diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 56df80ad..6d408f1c 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -115,8 +115,7 @@ jobs: pytest -sv --durations=0 tests/e2e/singlecard/test_sampler.py pytest -sv --durations=0 tests/e2e/singlecard/test_vlm.py pytest -sv --durations=0 tests/e2e/singlecard/test_xlite.py - # TODO: revert me when minicpm is fixed - # pytest -sv --durations=0 tests/e2e/singlecard/test_models.py + pytest -sv --durations=0 tests/e2e/singlecard/test_models.py pytest -sv --durations=0 tests/e2e/singlecard/pooling/ pytest -sv --durations=0 tests/e2e/singlecard/compile/test_norm_quant_fusion.py pytest -sv --durations=0 tests/e2e/singlecard/test_multistream_overlap_shared_expert.py diff --git a/tests/e2e/singlecard/test_models.py b/tests/e2e/singlecard/test_models.py index 20fe6f77..8068eda5 100644 --- a/tests/e2e/singlecard/test_models.py +++ b/tests/e2e/singlecard/test_models.py @@ -19,18 +19,28 @@ import os +import pytest +from modelscope import snapshot_download # type: ignore + from tests.e2e.conftest import VllmRunner os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" +# Note: MiniCPM-2B is a MHA model, MiniCPM4-0.5B is a GQA model +MINICPM_MODELS = [ + "openbmb/MiniCPM-2B-sft-bf16", + "OpenBMB/MiniCPM4-0.5B", +] -def test_minicpm_2b() -> None: + +@pytest.mark.parametrize("model", MINICPM_MODELS) +def test_minicpm(model) -> None: example_prompts = [ "Hello, my name is", ] max_tokens = 5 - with VllmRunner("openbmb/MiniCPM-2B-sft-bf16", + with VllmRunner(snapshot_download(model), max_model_len=512, gpu_memory_utilization=0.7) as runner: runner.generate_greedy(example_prompts, max_tokens) diff --git a/vllm_ascend/attention/attention_v1.py b/vllm_ascend/attention/attention_v1.py index 001d58fb..0aef154a 100644 --- a/vllm_ascend/attention/attention_v1.py +++ b/vllm_ascend/attention/attention_v1.py @@ -532,6 +532,9 @@ class AscendAttentionBackendImpl(AttentionImpl): = self._get_fia_params(key, value, attn_metadata) num_tokens = attn_metadata.actual_seq_lengths_q[-1] query = query[:num_tokens] + if attn_metadata.attn_state == AscendAttentionState.PrefillNoCache: + key = key[:num_tokens] + value = value[:num_tokens] # Get workspace from cache or calculate it if not present. attn_output, _ = torch_npu.npu_fused_infer_attention_score( query=query,