From 45c5bcd962d6357363b68831bf712c9efd6e58d0 Mon Sep 17 00:00:00 2001 From: zhangyiming <34808445+menogrey@users.noreply.github.com> Date: Fri, 26 Dec 2025 14:17:50 +0800 Subject: [PATCH] [E2E] Optimize the E2E test time. (#5294) ### What this PR does / why we need it? Add cudagraph_capture_sizes for E2E CI test. - vLLM version: release/v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad32e3e19ccf0526cb6744a5fed09a138a5fb2f9 Signed-off-by: menogrey <1299267905@qq.com> --- tests/e2e/multicard/long_sequence/test_basic.py | 2 ++ tests/e2e/multicard/test_expert_parallel.py | 5 ++++- tests/e2e/multicard/test_full_graph_mode.py | 2 ++ tests/e2e/multicard/test_ilama_lora_tp2.py | 1 + .../multicard/test_offline_inference_distributed.py | 5 +++++ tests/e2e/multicard/test_pipeline_parallel.py | 1 + tests/e2e/multicard/test_prefix_caching.py | 2 ++ tests/e2e/multicard/test_quantization.py | 1 + tests/e2e/multicard/test_qwen3_moe.py | 3 +++ tests/e2e/multicard/test_qwen3_next.py | 2 ++ tests/e2e/singlecard/pooling/test_embedding.py | 1 + .../spec_decode_v1/test_v1_mtp_correctness.py | 1 + .../singlecard/spec_decode_v1/test_v1_spec_decode.py | 9 ++++++++- tests/e2e/singlecard/test_aclgraph_accuracy.py | 12 ++++++++++-- tests/e2e/singlecard/test_camem.py | 4 +++- .../singlecard/test_completion_with_prompt_embeds.py | 1 + tests/e2e/singlecard/test_guided_decoding.py | 2 ++ tests/e2e/singlecard/test_ilama_lora.py | 1 + tests/e2e/singlecard/test_quantization.py | 1 + tests/e2e/singlecard/test_sampler.py | 3 +++ tests/e2e/singlecard/test_vlm.py | 2 ++ tests/e2e/vllm_interface/singlecard/test_sampler.py | 1 + 22 files changed, 57 insertions(+), 5 deletions(-) diff --git a/tests/e2e/multicard/long_sequence/test_basic.py b/tests/e2e/multicard/long_sequence/test_basic.py index f0b319b3..dde37364 100644 --- a/tests/e2e/multicard/long_sequence/test_basic.py +++ b/tests/e2e/multicard/long_sequence/test_basic.py @@ -122,6 +122,7 @@ def test_models_pcp_dcp_piece_wise(): decode_context_parallel_size=2, max_num_batched_tokens=1024, enable_expert_parallel=True, + cudagraph_capture_sizes=[1, 2, 4, 8], block_size=128) as runner: runner.model.generate(prompts, sampling_params) @@ -132,6 +133,7 @@ def test_models_pcp_dcp_piece_wise(): prefill_context_parallel_size=2, decode_context_parallel_size=1, enable_expert_parallel=True, + cudagraph_capture_sizes=[1, 2, 4, 8], block_size=128, quantization="ascend") as runner: runner.model.generate(prompts, sampling_params) diff --git a/tests/e2e/multicard/test_expert_parallel.py b/tests/e2e/multicard/test_expert_parallel.py index 9149d12e..86a46d8d 100644 --- a/tests/e2e/multicard/test_expert_parallel.py +++ b/tests/e2e/multicard/test_expert_parallel.py @@ -15,11 +15,14 @@ def test_deepseek_correctness_ep(model_name): max_tokens = 5 # FIXME: Really strange that chunked prefill might lead to different results, investigate further - with VllmRunner(model_name, tensor_parallel_size=2) as vllm_model: + with VllmRunner(model_name, + cudagraph_capture_sizes=[1, 2, 4, 8], + tensor_parallel_size=2) as vllm_model: tp_output = vllm_model.generate_greedy(example_prompts, max_tokens) with VllmRunner(model_name, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], enable_expert_parallel=True) as vllm_model: ep_output = vllm_model.generate_greedy(example_prompts, max_tokens) diff --git a/tests/e2e/multicard/test_full_graph_mode.py b/tests/e2e/multicard/test_full_graph_mode.py index e47b4e18..54c0a5f9 100644 --- a/tests/e2e/multicard/test_full_graph_mode.py +++ b/tests/e2e/multicard/test_full_graph_mode.py @@ -51,6 +51,7 @@ def test_qwen3_moe_full_decode_only_tp2(): with VllmRunner( model, max_model_len=1024, + cudagraph_capture_sizes=[4, 8, 24, 48, 60], tensor_parallel_size=2, ) as runner: vllm_eager_outputs = runner.model.generate(prompts, sampling_params) @@ -95,6 +96,7 @@ def test_qwen3_moe_full_graph_tp2(): with VllmRunner( model, max_model_len=1024, + cudagraph_capture_sizes=[4, 8, 24, 48, 60], tensor_parallel_size=2, ) as runner: vllm_eager_outputs = runner.model.generate(prompts, sampling_params) diff --git a/tests/e2e/multicard/test_ilama_lora_tp2.py b/tests/e2e/multicard/test_ilama_lora_tp2.py index 230aac16..f37978b7 100644 --- a/tests/e2e/multicard/test_ilama_lora_tp2.py +++ b/tests/e2e/multicard/test_ilama_lora_tp2.py @@ -16,6 +16,7 @@ def test_ilama_lora_tp2(distributed_executor_backend, ilama_lora_files): max_model_len=1024, max_num_seqs=16, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], distributed_executor_backend=distributed_executor_backend, ) as vllm_model: output = do_sample(vllm_model.model, ilama_lora_files, lora_id=2) diff --git a/tests/e2e/multicard/test_offline_inference_distributed.py b/tests/e2e/multicard/test_offline_inference_distributed.py index 34b5b3d4..acfaf416 100644 --- a/tests/e2e/multicard/test_offline_inference_distributed.py +++ b/tests/e2e/multicard/test_offline_inference_distributed.py @@ -60,6 +60,7 @@ def test_deepseek_multistream_moe_tp2(): "vllm-ascend/DeepSeek-V3-Pruning", dtype=dtype, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], distributed_executor_backend="mp", additional_config={ "enable_multistream_moe": True, @@ -80,6 +81,7 @@ def test_qwen3_w4a8_dynamic_tp2(model): max_model_len=8192, dtype="auto", tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_model.generate_greedy(prompts, max_tokens) @@ -120,6 +122,7 @@ def test_deepseek_w4a8_accuracy_tp2(model): with VllmRunner(snapshot_download(model), dtype="auto", tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", enable_expert_parallel=True) as vllm_model: vllm_quant_outputs = vllm_model.model.generate(prompts, @@ -190,6 +193,7 @@ def test_qwen3_dense_fc1_tp2(model): max_model_len=8192, dtype="auto", tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) @@ -208,6 +212,7 @@ def test_qwen3_dense_prefetch_mlp_weight_tp2(model): max_model_len=8192, dtype="auto", tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) diff --git a/tests/e2e/multicard/test_pipeline_parallel.py b/tests/e2e/multicard/test_pipeline_parallel.py index 855724ea..c2dc2d90 100644 --- a/tests/e2e/multicard/test_pipeline_parallel.py +++ b/tests/e2e/multicard/test_pipeline_parallel.py @@ -42,6 +42,7 @@ def test_models_pp2(model: str, tp_size: int, pp_size: int, with VllmRunner(model, tensor_parallel_size=tp_size, pipeline_parallel_size=pp_size, + cudagraph_capture_sizes=[1, 2, 4, 8], distributed_executor_backend=distributed_executor_backend, gpu_memory_utilization=0.7) as vllm_model: vllm_model.generate_greedy(prompts, 64) diff --git a/tests/e2e/multicard/test_prefix_caching.py b/tests/e2e/multicard/test_prefix_caching.py index 272efc2a..b96a75c0 100644 --- a/tests/e2e/multicard/test_prefix_caching.py +++ b/tests/e2e/multicard/test_prefix_caching.py @@ -64,6 +64,7 @@ def test_models_prefix_cache_tp2(model: str, max_tokens: int) -> None: with VllmRunner(model, max_model_len=2048, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7) as vllm_model: prefix_cache_output = vllm_model.generate_greedy( INPUT_PROMPTS, max_tokens) @@ -72,6 +73,7 @@ def test_models_prefix_cache_tp2(model: str, max_tokens: int) -> None: enable_prefix_caching=False, max_model_len=2048, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7) as vllm_model: vllm_output = vllm_model.generate_greedy(INPUT_PROMPTS, max_tokens) diff --git a/tests/e2e/multicard/test_quantization.py b/tests/e2e/multicard/test_quantization.py index c37bfa5a..38c6157f 100644 --- a/tests/e2e/multicard/test_quantization.py +++ b/tests/e2e/multicard/test_quantization.py @@ -33,6 +33,7 @@ def test_qwen2_5_w8a8_external_quantized_tp2(): with VllmRunner( snapshot_download("neuralmagic/Qwen2.5-3B-quantized.w8a8"), tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], max_model_len=4096, gpu_memory_utilization=0.8, ) as vllm_model: diff --git a/tests/e2e/multicard/test_qwen3_moe.py b/tests/e2e/multicard/test_qwen3_moe.py index aa209b70..f0f93d95 100644 --- a/tests/e2e/multicard/test_qwen3_moe.py +++ b/tests/e2e/multicard/test_qwen3_moe.py @@ -43,6 +43,7 @@ def test_qwen3_moe_distributed_mp_tp2_ep(): "Qwen/Qwen3-30B-A3B", tensor_parallel_size=2, enable_expert_parallel=True, + cudagraph_capture_sizes=[1, 2, 4, 8], distributed_executor_backend="mp", ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) @@ -57,6 +58,7 @@ def test_qwen3_moe_w8a8_distributed_tp2(): snapshot_download("vllm-ascend/Qwen3-30B-A3B-W8A8"), max_model_len=8192, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) @@ -73,6 +75,7 @@ def test_qwen3_moe_distributed_aiv_tp2(): "Qwen/Qwen3-30B-A3B", dtype=dtype, tensor_parallel_size=2, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) diff --git a/tests/e2e/multicard/test_qwen3_next.py b/tests/e2e/multicard/test_qwen3_next.py index 83387acc..ed21d147 100644 --- a/tests/e2e/multicard/test_qwen3_next.py +++ b/tests/e2e/multicard/test_qwen3_next.py @@ -36,6 +36,7 @@ def test_qwen3_next_distributed_mp_tp4(): max_tokens = 5 with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct", tensor_parallel_size=4, + cudagraph_capture_sizes=[1, 2, 4, 8], max_model_len=4096, gpu_memory_utilization=0.8, distributed_executor_backend="mp") as vllm_model: @@ -125,6 +126,7 @@ def test_qwen3_next_w8a8dynamic_distributed_tp4_ep(): gpu_memory_utilization=0.4, max_num_seqs=1, enable_expert_parallel=True, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_model.generate_greedy(example_prompts, max_tokens) diff --git a/tests/e2e/singlecard/pooling/test_embedding.py b/tests/e2e/singlecard/pooling/test_embedding.py index 8800fc7a..54eae677 100644 --- a/tests/e2e/singlecard/pooling/test_embedding.py +++ b/tests/e2e/singlecard/pooling/test_embedding.py @@ -64,6 +64,7 @@ def test_bge_m3_correctness(): with VllmRunner( model_name, runner="pooling", + cudagraph_capture_sizes=[4], ) as vllm_aclgraph_runner: vllm_aclgraph_outputs = vllm_aclgraph_runner.embed(queries) diff --git a/tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py b/tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py index d8c8fb23..9369c4e2 100644 --- a/tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py +++ b/tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py @@ -41,6 +41,7 @@ def mtp_correctness(sampling_config: SamplingParams, tensor_parallel_size=1, gpu_memory_utilization=0.7, max_model_len=256, + cudagraph_capture_sizes=[12], enforce_eager=enforce_eager) as ref_llm: ref_outputs = ref_llm.generate(example_prompts, sampling_config) diff --git a/tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py b/tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py index 1526997a..01e56371 100644 --- a/tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py +++ b/tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py @@ -79,6 +79,7 @@ def test_ngram_correctness( with VllmRunner( model_name, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as ref_llm: ref_outputs = ref_llm.model.chat(test_prompts, sampling_config) @@ -91,6 +92,7 @@ def test_ngram_correctness( "num_speculative_tokens": 3, }, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as runner: spec_outputs = runner.model.chat(test_prompts, sampling_config) matches = 0 @@ -193,7 +195,9 @@ def test_suffix_correctness( Compare the outputs of a original LLM and a speculative LLM should be the same when using ngram speculative decoding. ''' - with VllmRunner(model_name, max_model_len=1024) as ref_llm: + with VllmRunner(model_name, + max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8]) as ref_llm: ref_outputs = ref_llm.model.chat(test_prompts, sampling_config) with VllmRunner(model_name, @@ -201,6 +205,7 @@ def test_suffix_correctness( "method": "suffix", "num_speculative_tokens": 8, }, + cudagraph_capture_sizes=[1, 2, 4, 8], max_model_len=1024) as runner: spec_outputs = runner.model.chat(test_prompts, sampling_config) matches = 0 @@ -237,6 +242,7 @@ def test_suffix_acceptance( "num_speculative_tokens": 10, }, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], disable_log_stats=False) as runner: for i in range(10): runner.model.chat(test_prompts[i], sampling_config) @@ -300,6 +306,7 @@ def test_eagle_logprobs( "max_model_len": 128, }, max_model_len=128, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as runner: spec_outputs = runner.model.chat([prompt], sampling_params) diff --git a/tests/e2e/singlecard/test_aclgraph_accuracy.py b/tests/e2e/singlecard/test_aclgraph_accuracy.py index 8863e726..8b7b98c0 100644 --- a/tests/e2e/singlecard/test_aclgraph_accuracy.py +++ b/tests/e2e/singlecard/test_aclgraph_accuracy.py @@ -64,6 +64,7 @@ def test_models_output_between_eager_and_aclgraph( with VllmRunner( model, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as runner: vllm_aclgraph_outputs = runner.model.generate( @@ -72,6 +73,7 @@ def test_models_output_between_eager_and_aclgraph( with VllmRunner( model, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as runner: vllm_aclgraph_outputs = runner.model.generate( prompts, sampling_params) @@ -151,7 +153,10 @@ def test_models_output_between_eager_and_full_decode_only( with VllmRunner( model, max_model_len=1024, - compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"}, + compilation_config={ + "cudagraph_capture_sizes": [4, 8, 32, 64], + "cudagraph_mode": "FULL_DECODE_ONLY" + }, quantization="ascend", ) as runner: vllm_aclgraph_outputs = runner.model.generate( @@ -245,7 +250,10 @@ def test_models_output_between_eager_and_fullgraph_npugraph_ex( with VllmRunner( model, max_model_len=1024, - compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"}, + compilation_config={ + "cudagraph_capture_sizes": [4, 8, 32, 64], + "cudagraph_mode": "FULL_DECODE_ONLY" + }, additional_config={"enable_npugraph_ex": True}, quantization="ascend", ) as runner: diff --git a/tests/e2e/singlecard/test_camem.py b/tests/e2e/singlecard/test_camem.py index f3fd6aa4..5bdf68b7 100644 --- a/tests/e2e/singlecard/test_camem.py +++ b/tests/e2e/singlecard/test_camem.py @@ -37,7 +37,9 @@ def test_end_to_end(): prompt = "How are you?" sampling_params = SamplingParams(temperature=0, max_tokens=10) - with VllmRunner("Qwen/Qwen3-0.6B", enable_sleep_mode=True) as runner: + with VllmRunner("Qwen/Qwen3-0.6B", + enable_sleep_mode=True, + cudagraph_capture_sizes=[1, 2, 4, 8]) as runner: output = runner.model.generate(prompt, sampling_params) # the benefit of `llm.sleep(level=2)` is mainly CPU memory usage, diff --git a/tests/e2e/singlecard/test_completion_with_prompt_embeds.py b/tests/e2e/singlecard/test_completion_with_prompt_embeds.py index d5fff2f2..0e8ececa 100644 --- a/tests/e2e/singlecard/test_completion_with_prompt_embeds.py +++ b/tests/e2e/singlecard/test_completion_with_prompt_embeds.py @@ -55,6 +55,7 @@ def test_mixed_prompt_embeds_and_text(model_name): with VllmRunner( model_name, enable_prompt_embeds=True, + cudagraph_capture_sizes=[1, 2, 4, 8], ) as vllm_runner: # Test prompt embeddings embeds_output = vllm_runner.model.generate({ diff --git a/tests/e2e/singlecard/test_guided_decoding.py b/tests/e2e/singlecard/test_guided_decoding.py index e0e63141..8b1d83a8 100644 --- a/tests/e2e/singlecard/test_guided_decoding.py +++ b/tests/e2e/singlecard/test_guided_decoding.py @@ -89,6 +89,7 @@ def test_guided_json_completion(guided_decoding_backend: str, max_tokens=500, structured_outputs=StructuredOutputsParams(json=sample_json_schema)) runner_kwargs = { + "cudagraph_capture_sizes": [1, 2, 4, 8], "seed": 0, "structured_outputs_config": { "backend": guided_decoding_backend @@ -128,6 +129,7 @@ def test_guided_regex(guided_decoding_backend: str, sample_regex): top_p=0.95, structured_outputs=StructuredOutputsParams(regex=sample_regex)) runner_kwargs = { + "cudagraph_capture_sizes": [1, 2, 4, 8], "seed": 0, "structured_outputs_config": { "backend": guided_decoding_backend diff --git a/tests/e2e/singlecard/test_ilama_lora.py b/tests/e2e/singlecard/test_ilama_lora.py index 1b941a06..a1f9fd41 100644 --- a/tests/e2e/singlecard/test_ilama_lora.py +++ b/tests/e2e/singlecard/test_ilama_lora.py @@ -51,6 +51,7 @@ def test_ilama_lora(ilama_lora_files): dtype="half", max_loras=4, max_model_len=1024, + cudagraph_capture_sizes=[1, 2, 4, 8], max_num_seqs=16, ) as vllm_model: diff --git a/tests/e2e/singlecard/test_quantization.py b/tests/e2e/singlecard/test_quantization.py index 9a5c8fcc..c510f291 100644 --- a/tests/e2e/singlecard/test_quantization.py +++ b/tests/e2e/singlecard/test_quantization.py @@ -36,6 +36,7 @@ def test_qwen3_w8a8_quant(): snapshot_download("vllm-ascend/Qwen3-0.6B-W8A8"), max_model_len=8192, gpu_memory_utilization=0.7, + cudagraph_capture_sizes=[1, 2, 4, 8], quantization="ascend", ) as vllm_model: vllm_quant_w8a8_outputs = vllm_model.generate_greedy( diff --git a/tests/e2e/singlecard/test_sampler.py b/tests/e2e/singlecard/test_sampler.py index 31c065cc..894977b8 100644 --- a/tests/e2e/singlecard/test_sampler.py +++ b/tests/e2e/singlecard/test_sampler.py @@ -32,6 +32,7 @@ def test_qwen3_topk() -> None: with VllmRunner("Qwen/Qwen3-0.6B", max_model_len=8192, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7) as runner: runner.generate(example_prompts, sampling_params) @@ -43,6 +44,7 @@ def test_qwen3_prompt_logprobs() -> None: with VllmRunner("Qwen/Qwen3-0.6B", max_model_len=8192, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7) as runner: runner.generate_greedy_logprobs(example_prompts, max_tokens=5, @@ -60,6 +62,7 @@ def test_qwen3_exponential_overlap() -> None: with VllmRunner("Qwen/Qwen3-0.6B", max_model_len=8192, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7, additional_config={ "enable_async_exponential": True, diff --git a/tests/e2e/singlecard/test_vlm.py b/tests/e2e/singlecard/test_vlm.py index d2ca42be..33e896ff 100644 --- a/tests/e2e/singlecard/test_vlm.py +++ b/tests/e2e/singlecard/test_vlm.py @@ -47,6 +47,7 @@ def test_multimodal_vl(vl_config): with VllmRunner(vl_config["model"], mm_processor_kwargs=vl_config["mm_processor_kwargs"], max_model_len=8192, + cudagraph_capture_sizes=[1, 2, 4, 8], limit_mm_per_prompt={"image": 1}) as vllm_model: outputs = vllm_model.generate_greedy( prompts=prompts, @@ -89,6 +90,7 @@ def test_multimodal_audio(): max_num_seqs=5, dtype="bfloat16", limit_mm_per_prompt={"audio": 2}, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.9) as runner: outputs = runner.generate(inputs, sampling_params=sampling_params) diff --git a/tests/e2e/vllm_interface/singlecard/test_sampler.py b/tests/e2e/vllm_interface/singlecard/test_sampler.py index 662e76e3..be66612e 100644 --- a/tests/e2e/vllm_interface/singlecard/test_sampler.py +++ b/tests/e2e/vllm_interface/singlecard/test_sampler.py @@ -32,5 +32,6 @@ def test_models_topk() -> None: with VllmRunner("Qwen/Qwen3-0.6B", max_model_len=4096, + cudagraph_capture_sizes=[1, 2, 4, 8], gpu_memory_utilization=0.7) as runner: runner.generate(example_prompts, sampling_params)