[bugfix] Fix MHA model runtime error in aclgraph mode (#5397)
### What this PR does / why we need it?
Currently, MHA models (eg: minicpm-2b, Baichuan-7b) will encounter
errors when running in piecewise graph mode, with error messages similar
to:
```
(E89999): When layout is TND and PA not enabled, keyT(8) and valueT(8) must be equal to the last element of actualSeqenceLengthKV(5)[FUNC:CheckInputShapeWhenLayoutIsTND][FILE:prompt_flash_attention_tiling.cpp][LINE:3618]
```
The error occurs because the qkv in the Prefill stage is also padded,
causing the shape to be inconsistent with actual_seq_lengths.
Add unpadding logic for kv.
- vLLM version: release/v0.13.0
- vLLM main:
254f6b9867
Signed-off-by: Wang Kunpeng <1289706727@qq.com>
This commit is contained in:
3
.github/workflows/_e2e_test.yaml
vendored
3
.github/workflows/_e2e_test.yaml
vendored
@@ -115,8 +115,7 @@ jobs:
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/test_sampler.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/test_vlm.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/test_xlite.py
|
||||
# TODO: revert me when minicpm is fixed
|
||||
# pytest -sv --durations=0 tests/e2e/singlecard/test_models.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/test_models.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/pooling/
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/compile/test_norm_quant_fusion.py
|
||||
pytest -sv --durations=0 tests/e2e/singlecard/test_multistream_overlap_shared_expert.py
|
||||
|
||||
@@ -19,18 +19,28 @@
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
|
||||
|
||||
# Note: MiniCPM-2B is a MHA model, MiniCPM4-0.5B is a GQA model
|
||||
MINICPM_MODELS = [
|
||||
"openbmb/MiniCPM-2B-sft-bf16",
|
||||
"OpenBMB/MiniCPM4-0.5B",
|
||||
]
|
||||
|
||||
def test_minicpm_2b() -> None:
|
||||
|
||||
@pytest.mark.parametrize("model", MINICPM_MODELS)
|
||||
def test_minicpm(model) -> None:
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
]
|
||||
max_tokens = 5
|
||||
|
||||
with VllmRunner("openbmb/MiniCPM-2B-sft-bf16",
|
||||
with VllmRunner(snapshot_download(model),
|
||||
max_model_len=512,
|
||||
gpu_memory_utilization=0.7) as runner:
|
||||
runner.generate_greedy(example_prompts, max_tokens)
|
||||
|
||||
@@ -532,6 +532,9 @@ class AscendAttentionBackendImpl(AttentionImpl):
|
||||
= self._get_fia_params(key, value, attn_metadata)
|
||||
num_tokens = attn_metadata.actual_seq_lengths_q[-1]
|
||||
query = query[:num_tokens]
|
||||
if attn_metadata.attn_state == AscendAttentionState.PrefillNoCache:
|
||||
key = key[:num_tokens]
|
||||
value = value[:num_tokens]
|
||||
# Get workspace from cache or calculate it if not present.
|
||||
attn_output, _ = torch_npu.npu_fused_infer_attention_score(
|
||||
query=query,
|
||||
|
||||
Reference in New Issue
Block a user