[E2E] Optimize the E2E test time. (#5294)

### What this PR does / why we need it?
Add cudagraph_capture_sizes for E2E CI test.

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

Signed-off-by: menogrey <1299267905@qq.com>
This commit is contained in:
zhangyiming
2025-12-26 14:17:50 +08:00
committed by GitHub
parent 29d2fe653d
commit 45c5bcd962
22 changed files with 57 additions and 5 deletions

View File

@@ -43,6 +43,7 @@ def test_qwen3_moe_distributed_mp_tp2_ep():
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=2,
enable_expert_parallel=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
distributed_executor_backend="mp",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -57,6 +58,7 @@ def test_qwen3_moe_w8a8_distributed_tp2():
snapshot_download("vllm-ascend/Qwen3-30B-A3B-W8A8"),
max_model_len=8192,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -73,6 +75,7 @@ def test_qwen3_moe_distributed_aiv_tp2():
"Qwen/Qwen3-30B-A3B",
dtype=dtype,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)