[E2E] Optimize the E2E test time. (#5294)

### What this PR does / why we need it?
Add cudagraph_capture_sizes for E2E CI test.

- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c

Signed-off-by: menogrey <1299267905@qq.com>
This commit is contained in:
zhangyiming
2025-12-26 14:17:50 +08:00
committed by GitHub
parent 29d2fe653d
commit 45c5bcd962
22 changed files with 57 additions and 5 deletions

View File

@@ -122,6 +122,7 @@ def test_models_pcp_dcp_piece_wise():
decode_context_parallel_size=2,
max_num_batched_tokens=1024,
enable_expert_parallel=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
block_size=128) as runner:
runner.model.generate(prompts, sampling_params)
@@ -132,6 +133,7 @@ def test_models_pcp_dcp_piece_wise():
prefill_context_parallel_size=2,
decode_context_parallel_size=1,
enable_expert_parallel=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
block_size=128,
quantization="ascend") as runner:
runner.model.generate(prompts, sampling_params)

View File

@@ -15,11 +15,14 @@ def test_deepseek_correctness_ep(model_name):
max_tokens = 5
# FIXME: Really strange that chunked prefill might lead to different results, investigate further
with VllmRunner(model_name, tensor_parallel_size=2) as vllm_model:
with VllmRunner(model_name,
cudagraph_capture_sizes=[1, 2, 4, 8],
tensor_parallel_size=2) as vllm_model:
tp_output = vllm_model.generate_greedy(example_prompts, max_tokens)
with VllmRunner(model_name,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
enable_expert_parallel=True) as vllm_model:
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -51,6 +51,7 @@ def test_qwen3_moe_full_decode_only_tp2():
with VllmRunner(
model,
max_model_len=1024,
cudagraph_capture_sizes=[4, 8, 24, 48, 60],
tensor_parallel_size=2,
) as runner:
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)
@@ -95,6 +96,7 @@ def test_qwen3_moe_full_graph_tp2():
with VllmRunner(
model,
max_model_len=1024,
cudagraph_capture_sizes=[4, 8, 24, 48, 60],
tensor_parallel_size=2,
) as runner:
vllm_eager_outputs = runner.model.generate(prompts, sampling_params)

View File

@@ -16,6 +16,7 @@ def test_ilama_lora_tp2(distributed_executor_backend, ilama_lora_files):
max_model_len=1024,
max_num_seqs=16,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
distributed_executor_backend=distributed_executor_backend,
) as vllm_model:
output = do_sample(vllm_model.model, ilama_lora_files, lora_id=2)

View File

@@ -60,6 +60,7 @@ def test_deepseek_multistream_moe_tp2():
"vllm-ascend/DeepSeek-V3-Pruning",
dtype=dtype,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
distributed_executor_backend="mp",
additional_config={
"enable_multistream_moe": True,
@@ -80,6 +81,7 @@ def test_qwen3_w4a8_dynamic_tp2(model):
max_model_len=8192,
dtype="auto",
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(prompts, max_tokens)
@@ -120,6 +122,7 @@ def test_deepseek_w4a8_accuracy_tp2(model):
with VllmRunner(snapshot_download(model),
dtype="auto",
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
enable_expert_parallel=True) as vllm_model:
vllm_quant_outputs = vllm_model.model.generate(prompts,
@@ -190,6 +193,7 @@ def test_qwen3_dense_fc1_tp2(model):
max_model_len=8192,
dtype="auto",
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -208,6 +212,7 @@ def test_qwen3_dense_prefetch_mlp_weight_tp2(model):
max_model_len=8192,
dtype="auto",
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -42,6 +42,7 @@ def test_models_pp2(model: str, tp_size: int, pp_size: int,
with VllmRunner(model,
tensor_parallel_size=tp_size,
pipeline_parallel_size=pp_size,
cudagraph_capture_sizes=[1, 2, 4, 8],
distributed_executor_backend=distributed_executor_backend,
gpu_memory_utilization=0.7) as vllm_model:
vllm_model.generate_greedy(prompts, 64)

View File

@@ -64,6 +64,7 @@ def test_models_prefix_cache_tp2(model: str, max_tokens: int) -> None:
with VllmRunner(model,
max_model_len=2048,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7) as vllm_model:
prefix_cache_output = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)
@@ -72,6 +73,7 @@ def test_models_prefix_cache_tp2(model: str, max_tokens: int) -> None:
enable_prefix_caching=False,
max_model_len=2048,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7) as vllm_model:
vllm_output = vllm_model.generate_greedy(INPUT_PROMPTS, max_tokens)

View File

@@ -33,6 +33,7 @@ def test_qwen2_5_w8a8_external_quantized_tp2():
with VllmRunner(
snapshot_download("neuralmagic/Qwen2.5-3B-quantized.w8a8"),
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
max_model_len=4096,
gpu_memory_utilization=0.8,
) as vllm_model:

View File

@@ -43,6 +43,7 @@ def test_qwen3_moe_distributed_mp_tp2_ep():
"Qwen/Qwen3-30B-A3B",
tensor_parallel_size=2,
enable_expert_parallel=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
distributed_executor_backend="mp",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -57,6 +58,7 @@ def test_qwen3_moe_w8a8_distributed_tp2():
snapshot_download("vllm-ascend/Qwen3-30B-A3B-W8A8"),
max_model_len=8192,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)
@@ -73,6 +75,7 @@ def test_qwen3_moe_distributed_aiv_tp2():
"Qwen/Qwen3-30B-A3B",
dtype=dtype,
tensor_parallel_size=2,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -36,6 +36,7 @@ def test_qwen3_next_distributed_mp_tp4():
max_tokens = 5
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
tensor_parallel_size=4,
cudagraph_capture_sizes=[1, 2, 4, 8],
max_model_len=4096,
gpu_memory_utilization=0.8,
distributed_executor_backend="mp") as vllm_model:
@@ -125,6 +126,7 @@ def test_qwen3_next_w8a8dynamic_distributed_tp4_ep():
gpu_memory_utilization=0.4,
max_num_seqs=1,
enable_expert_parallel=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)

View File

@@ -64,6 +64,7 @@ def test_bge_m3_correctness():
with VllmRunner(
model_name,
runner="pooling",
cudagraph_capture_sizes=[4],
) as vllm_aclgraph_runner:
vllm_aclgraph_outputs = vllm_aclgraph_runner.embed(queries)

View File

@@ -41,6 +41,7 @@ def mtp_correctness(sampling_config: SamplingParams,
tensor_parallel_size=1,
gpu_memory_utilization=0.7,
max_model_len=256,
cudagraph_capture_sizes=[12],
enforce_eager=enforce_eager) as ref_llm:
ref_outputs = ref_llm.generate(example_prompts, sampling_config)

View File

@@ -79,6 +79,7 @@ def test_ngram_correctness(
with VllmRunner(
model_name,
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as ref_llm:
ref_outputs = ref_llm.model.chat(test_prompts, sampling_config)
@@ -91,6 +92,7 @@ def test_ngram_correctness(
"num_speculative_tokens": 3,
},
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as runner:
spec_outputs = runner.model.chat(test_prompts, sampling_config)
matches = 0
@@ -193,7 +195,9 @@ def test_suffix_correctness(
Compare the outputs of a original LLM and a speculative LLM
should be the same when using ngram speculative decoding.
'''
with VllmRunner(model_name, max_model_len=1024) as ref_llm:
with VllmRunner(model_name,
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8]) as ref_llm:
ref_outputs = ref_llm.model.chat(test_prompts, sampling_config)
with VllmRunner(model_name,
@@ -201,6 +205,7 @@ def test_suffix_correctness(
"method": "suffix",
"num_speculative_tokens": 8,
},
cudagraph_capture_sizes=[1, 2, 4, 8],
max_model_len=1024) as runner:
spec_outputs = runner.model.chat(test_prompts, sampling_config)
matches = 0
@@ -237,6 +242,7 @@ def test_suffix_acceptance(
"num_speculative_tokens": 10,
},
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
disable_log_stats=False) as runner:
for i in range(10):
runner.model.chat(test_prompts[i], sampling_config)
@@ -300,6 +306,7 @@ def test_eagle_logprobs(
"max_model_len": 128,
},
max_model_len=128,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as runner:
spec_outputs = runner.model.chat([prompt], sampling_params)

View File

@@ -64,6 +64,7 @@ def test_models_output_between_eager_and_aclgraph(
with VllmRunner(
model,
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as runner:
vllm_aclgraph_outputs = runner.model.generate(
@@ -72,6 +73,7 @@ def test_models_output_between_eager_and_aclgraph(
with VllmRunner(
model,
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as runner:
vllm_aclgraph_outputs = runner.model.generate(
prompts, sampling_params)
@@ -151,7 +153,10 @@ def test_models_output_between_eager_and_full_decode_only(
with VllmRunner(
model,
max_model_len=1024,
compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"},
compilation_config={
"cudagraph_capture_sizes": [4, 8, 32, 64],
"cudagraph_mode": "FULL_DECODE_ONLY"
},
quantization="ascend",
) as runner:
vllm_aclgraph_outputs = runner.model.generate(
@@ -245,7 +250,10 @@ def test_models_output_between_eager_and_fullgraph_npugraph_ex(
with VllmRunner(
model,
max_model_len=1024,
compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"},
compilation_config={
"cudagraph_capture_sizes": [4, 8, 32, 64],
"cudagraph_mode": "FULL_DECODE_ONLY"
},
additional_config={"enable_npugraph_ex": True},
quantization="ascend",
) as runner:

View File

@@ -37,7 +37,9 @@ def test_end_to_end():
prompt = "How are you?"
sampling_params = SamplingParams(temperature=0, max_tokens=10)
with VllmRunner("Qwen/Qwen3-0.6B", enable_sleep_mode=True) as runner:
with VllmRunner("Qwen/Qwen3-0.6B",
enable_sleep_mode=True,
cudagraph_capture_sizes=[1, 2, 4, 8]) as runner:
output = runner.model.generate(prompt, sampling_params)
# the benefit of `llm.sleep(level=2)` is mainly CPU memory usage,

View File

@@ -55,6 +55,7 @@ def test_mixed_prompt_embeds_and_text(model_name):
with VllmRunner(
model_name,
enable_prompt_embeds=True,
cudagraph_capture_sizes=[1, 2, 4, 8],
) as vllm_runner:
# Test prompt embeddings
embeds_output = vllm_runner.model.generate({

View File

@@ -89,6 +89,7 @@ def test_guided_json_completion(guided_decoding_backend: str,
max_tokens=500,
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
runner_kwargs = {
"cudagraph_capture_sizes": [1, 2, 4, 8],
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend
@@ -128,6 +129,7 @@ def test_guided_regex(guided_decoding_backend: str, sample_regex):
top_p=0.95,
structured_outputs=StructuredOutputsParams(regex=sample_regex))
runner_kwargs = {
"cudagraph_capture_sizes": [1, 2, 4, 8],
"seed": 0,
"structured_outputs_config": {
"backend": guided_decoding_backend

View File

@@ -51,6 +51,7 @@ def test_ilama_lora(ilama_lora_files):
dtype="half",
max_loras=4,
max_model_len=1024,
cudagraph_capture_sizes=[1, 2, 4, 8],
max_num_seqs=16,
) as vllm_model:

View File

@@ -36,6 +36,7 @@ def test_qwen3_w8a8_quant():
snapshot_download("vllm-ascend/Qwen3-0.6B-W8A8"),
max_model_len=8192,
gpu_memory_utilization=0.7,
cudagraph_capture_sizes=[1, 2, 4, 8],
quantization="ascend",
) as vllm_model:
vllm_quant_w8a8_outputs = vllm_model.generate_greedy(

View File

@@ -32,6 +32,7 @@ def test_qwen3_topk() -> None:
with VllmRunner("Qwen/Qwen3-0.6B",
max_model_len=8192,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7) as runner:
runner.generate(example_prompts, sampling_params)
@@ -43,6 +44,7 @@ def test_qwen3_prompt_logprobs() -> None:
with VllmRunner("Qwen/Qwen3-0.6B",
max_model_len=8192,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7) as runner:
runner.generate_greedy_logprobs(example_prompts,
max_tokens=5,
@@ -60,6 +62,7 @@ def test_qwen3_exponential_overlap() -> None:
with VllmRunner("Qwen/Qwen3-0.6B",
max_model_len=8192,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7,
additional_config={
"enable_async_exponential": True,

View File

@@ -47,6 +47,7 @@ def test_multimodal_vl(vl_config):
with VllmRunner(vl_config["model"],
mm_processor_kwargs=vl_config["mm_processor_kwargs"],
max_model_len=8192,
cudagraph_capture_sizes=[1, 2, 4, 8],
limit_mm_per_prompt={"image": 1}) as vllm_model:
outputs = vllm_model.generate_greedy(
prompts=prompts,
@@ -89,6 +90,7 @@ def test_multimodal_audio():
max_num_seqs=5,
dtype="bfloat16",
limit_mm_per_prompt={"audio": 2},
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.9) as runner:
outputs = runner.generate(inputs, sampling_params=sampling_params)

View File

@@ -32,5 +32,6 @@ def test_models_topk() -> None:
with VllmRunner("Qwen/Qwen3-0.6B",
max_model_len=4096,
cudagraph_capture_sizes=[1, 2, 4, 8],
gpu_memory_utilization=0.7) as runner:
runner.generate(example_prompts, sampling_params)