[CI] Add skipped testcases. (#5254)

### What this PR does / why we need it?
Some E2E testcases are not in our CI workflow, this PR add them back.

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

Signed-off-by: menogrey <1299267905@qq.com>
This commit is contained in:
zhangyiming
2025-12-24 10:41:32 +08:00
committed by GitHub
parent 7ff1db4b84
commit bd4fb871c6
2 changed files with 8 additions and 0 deletions

View File

@@ -48,9 +48,11 @@ def test_models_with_multistream_overlap_shared_expert(
model,
max_model_len=1024,
enforce_eager=True,
cudagraph_capture_sizes=[4, 8, 16, 32],
additional_config={
"multistream_overlap_shared_expert": True,
},
quantization="ascend",
) as runner:
vllm_moe_ms_eager_outputs = runner.model.generate(
prompts, sampling_params)
@@ -58,9 +60,11 @@ def test_models_with_multistream_overlap_shared_expert(
with VllmRunner(
model,
max_model_len=1024,
cudagraph_capture_sizes=[4, 8, 16, 32],
additional_config={
"multistream_overlap_shared_expert": True,
},
quantization="ascend",
) as runner:
vllm_moe_ms_aclgraph_outputs = runner.model.generate(
prompts, sampling_params)
@@ -69,6 +73,8 @@ def test_models_with_multistream_overlap_shared_expert(
model,
max_model_len=1024,
enforce_eager=True,
cudagraph_capture_sizes=[4, 8, 16, 32],
quantization="ascend",
) as runner:
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)