[CI] refect e2e test (#4799)

### What this PR does / why we need it?
This PR updates the CI configuration and adjusts a set of end-to-end
(e2e) tests under tests/e2e/multicard, in order to refactor the test
suite and ensure compatibility with current codebase and CI workflows.

1. tests/e2e/multicard/test_prefix_caching.py: change model to Qwen3-8B
and rename the test case
2. tests/e2e/multicard/test_quantization.py: rename the test case
3. tests/e2e/multicard/test_qwen3_moe.py: remove duplicate test and
rename test cases
4. tests/e2e/multicard/test_qwen3_next.py: rename test cases and change
the W8A8 pruning model to the W8A8 model and remove the eager parameter
5. tests/e2e/multicard/test_shared_expert_dp.py: rename test case and
remove the eager parameter
6. tests/e2e/multicard/test_single_request_aclgraph.py: rename test case
and change Qwen3-30B to Qwen3-0.6B
7. tests/e2e/multicard/test_torchair_graph_mode.py: delete test cases
about torchair

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2025-12-12 08:42:08 +08:00
committed by GitHub
parent a6ef3ac4e4
commit bfafe30953
8 changed files with 30 additions and 66 deletions

View File

@@ -29,7 +29,7 @@ from modelscope import snapshot_download # type: ignore
from tests.e2e.conftest import VllmRunner
def test_models_distributed_Qwen3_NEXT_TP4():
def test_qwen3_next_distributed_mp_tp4():
example_prompts = [
"Hello, my name is",
] * 4
@@ -38,13 +38,12 @@ def test_models_distributed_Qwen3_NEXT_TP4():
tensor_parallel_size=4,
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True) as vllm_model:
distributed_executor_backend="mp") as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
del vllm_model
def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
def test_qwen3_next_distributed_mp_full_decode_only_tp4():
example_prompts = [
"Hello, my name is",
] * 4
@@ -54,7 +53,6 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=False,
compilation_config={
"cudagraph_mode": "FULL_DECODE_ONLY",
"cudagraph_capture_sizes": [1, 8, 24, 48, 60]
@@ -64,7 +62,7 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
# TODO: Fix the accuary of batch chunked prefill
def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
def test_qwen3_next_distributed_mp_eager_mtp_similarity_tp4():
example_prompts = ["Hello, my name is"]
max_tokens = 20
@@ -110,16 +108,15 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
# TODO: will conduct accuracy verification after the subsequent version becomes stable
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "1024"})
def test_models_distributed_Qwen3_NEXT_W8A8DYNAMIC_WITH_EP():
def test_qwen3_next_w8a8dynamic_distributed_tp4_ep():
example_prompts = [
"Hello, my name is",
]
max_tokens = 5
with VllmRunner(
snapshot_download(
"vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8-Pruning"),
snapshot_download("vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8"),
max_model_len=4096,
tensor_parallel_size=2,
tensor_parallel_size=4,
gpu_memory_utilization=0.4,
max_num_seqs=1,
enable_expert_parallel=True,