[E2E] Optimize the E2E test time. (#5294)
### What this PR does / why we need it?
Add cudagraph_capture_sizes for E2E CI test.
- vLLM version: release/v0.13.0
- vLLM main:
ad32e3e19c
Signed-off-by: menogrey <1299267905@qq.com>
This commit is contained in:
@@ -64,6 +64,7 @@ def test_bge_m3_correctness():
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
runner="pooling",
|
||||
cudagraph_capture_sizes=[4],
|
||||
) as vllm_aclgraph_runner:
|
||||
vllm_aclgraph_outputs = vllm_aclgraph_runner.embed(queries)
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ def mtp_correctness(sampling_config: SamplingParams,
|
||||
tensor_parallel_size=1,
|
||||
gpu_memory_utilization=0.7,
|
||||
max_model_len=256,
|
||||
cudagraph_capture_sizes=[12],
|
||||
enforce_eager=enforce_eager) as ref_llm:
|
||||
ref_outputs = ref_llm.generate(example_prompts, sampling_config)
|
||||
|
||||
|
||||
@@ -79,6 +79,7 @@ def test_ngram_correctness(
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
) as ref_llm:
|
||||
ref_outputs = ref_llm.model.chat(test_prompts, sampling_config)
|
||||
|
||||
@@ -91,6 +92,7 @@ def test_ngram_correctness(
|
||||
"num_speculative_tokens": 3,
|
||||
},
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
) as runner:
|
||||
spec_outputs = runner.model.chat(test_prompts, sampling_config)
|
||||
matches = 0
|
||||
@@ -193,7 +195,9 @@ def test_suffix_correctness(
|
||||
Compare the outputs of a original LLM and a speculative LLM
|
||||
should be the same when using ngram speculative decoding.
|
||||
'''
|
||||
with VllmRunner(model_name, max_model_len=1024) as ref_llm:
|
||||
with VllmRunner(model_name,
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8]) as ref_llm:
|
||||
ref_outputs = ref_llm.model.chat(test_prompts, sampling_config)
|
||||
|
||||
with VllmRunner(model_name,
|
||||
@@ -201,6 +205,7 @@ def test_suffix_correctness(
|
||||
"method": "suffix",
|
||||
"num_speculative_tokens": 8,
|
||||
},
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
max_model_len=1024) as runner:
|
||||
spec_outputs = runner.model.chat(test_prompts, sampling_config)
|
||||
matches = 0
|
||||
@@ -237,6 +242,7 @@ def test_suffix_acceptance(
|
||||
"num_speculative_tokens": 10,
|
||||
},
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
disable_log_stats=False) as runner:
|
||||
for i in range(10):
|
||||
runner.model.chat(test_prompts[i], sampling_config)
|
||||
@@ -300,6 +306,7 @@ def test_eagle_logprobs(
|
||||
"max_model_len": 128,
|
||||
},
|
||||
max_model_len=128,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
) as runner:
|
||||
spec_outputs = runner.model.chat([prompt], sampling_params)
|
||||
|
||||
|
||||
@@ -64,6 +64,7 @@ def test_models_output_between_eager_and_aclgraph(
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
quantization="ascend",
|
||||
) as runner:
|
||||
vllm_aclgraph_outputs = runner.model.generate(
|
||||
@@ -72,6 +73,7 @@ def test_models_output_between_eager_and_aclgraph(
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
) as runner:
|
||||
vllm_aclgraph_outputs = runner.model.generate(
|
||||
prompts, sampling_params)
|
||||
@@ -151,7 +153,10 @@ def test_models_output_between_eager_and_full_decode_only(
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"},
|
||||
compilation_config={
|
||||
"cudagraph_capture_sizes": [4, 8, 32, 64],
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||
},
|
||||
quantization="ascend",
|
||||
) as runner:
|
||||
vllm_aclgraph_outputs = runner.model.generate(
|
||||
@@ -245,7 +250,10 @@ def test_models_output_between_eager_and_fullgraph_npugraph_ex(
|
||||
with VllmRunner(
|
||||
model,
|
||||
max_model_len=1024,
|
||||
compilation_config={"cudagraph_mode": "FULL_DECODE_ONLY"},
|
||||
compilation_config={
|
||||
"cudagraph_capture_sizes": [4, 8, 32, 64],
|
||||
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||
},
|
||||
additional_config={"enable_npugraph_ex": True},
|
||||
quantization="ascend",
|
||||
) as runner:
|
||||
|
||||
@@ -37,7 +37,9 @@ def test_end_to_end():
|
||||
prompt = "How are you?"
|
||||
sampling_params = SamplingParams(temperature=0, max_tokens=10)
|
||||
|
||||
with VllmRunner("Qwen/Qwen3-0.6B", enable_sleep_mode=True) as runner:
|
||||
with VllmRunner("Qwen/Qwen3-0.6B",
|
||||
enable_sleep_mode=True,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8]) as runner:
|
||||
|
||||
output = runner.model.generate(prompt, sampling_params)
|
||||
# the benefit of `llm.sleep(level=2)` is mainly CPU memory usage,
|
||||
|
||||
@@ -55,6 +55,7 @@ def test_mixed_prompt_embeds_and_text(model_name):
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
enable_prompt_embeds=True,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
) as vllm_runner:
|
||||
# Test prompt embeddings
|
||||
embeds_output = vllm_runner.model.generate({
|
||||
|
||||
@@ -89,6 +89,7 @@ def test_guided_json_completion(guided_decoding_backend: str,
|
||||
max_tokens=500,
|
||||
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
|
||||
runner_kwargs = {
|
||||
"cudagraph_capture_sizes": [1, 2, 4, 8],
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
@@ -128,6 +129,7 @@ def test_guided_regex(guided_decoding_backend: str, sample_regex):
|
||||
top_p=0.95,
|
||||
structured_outputs=StructuredOutputsParams(regex=sample_regex))
|
||||
runner_kwargs = {
|
||||
"cudagraph_capture_sizes": [1, 2, 4, 8],
|
||||
"seed": 0,
|
||||
"structured_outputs_config": {
|
||||
"backend": guided_decoding_backend
|
||||
|
||||
@@ -51,6 +51,7 @@ def test_ilama_lora(ilama_lora_files):
|
||||
dtype="half",
|
||||
max_loras=4,
|
||||
max_model_len=1024,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
max_num_seqs=16,
|
||||
) as vllm_model:
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ def test_qwen3_w8a8_quant():
|
||||
snapshot_download("vllm-ascend/Qwen3-0.6B-W8A8"),
|
||||
max_model_len=8192,
|
||||
gpu_memory_utilization=0.7,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
quantization="ascend",
|
||||
) as vllm_model:
|
||||
vllm_quant_w8a8_outputs = vllm_model.generate_greedy(
|
||||
|
||||
@@ -32,6 +32,7 @@ def test_qwen3_topk() -> None:
|
||||
|
||||
with VllmRunner("Qwen/Qwen3-0.6B",
|
||||
max_model_len=8192,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
gpu_memory_utilization=0.7) as runner:
|
||||
runner.generate(example_prompts, sampling_params)
|
||||
|
||||
@@ -43,6 +44,7 @@ def test_qwen3_prompt_logprobs() -> None:
|
||||
|
||||
with VllmRunner("Qwen/Qwen3-0.6B",
|
||||
max_model_len=8192,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
gpu_memory_utilization=0.7) as runner:
|
||||
runner.generate_greedy_logprobs(example_prompts,
|
||||
max_tokens=5,
|
||||
@@ -60,6 +62,7 @@ def test_qwen3_exponential_overlap() -> None:
|
||||
|
||||
with VllmRunner("Qwen/Qwen3-0.6B",
|
||||
max_model_len=8192,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
gpu_memory_utilization=0.7,
|
||||
additional_config={
|
||||
"enable_async_exponential": True,
|
||||
|
||||
@@ -47,6 +47,7 @@ def test_multimodal_vl(vl_config):
|
||||
with VllmRunner(vl_config["model"],
|
||||
mm_processor_kwargs=vl_config["mm_processor_kwargs"],
|
||||
max_model_len=8192,
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
limit_mm_per_prompt={"image": 1}) as vllm_model:
|
||||
outputs = vllm_model.generate_greedy(
|
||||
prompts=prompts,
|
||||
@@ -89,6 +90,7 @@ def test_multimodal_audio():
|
||||
max_num_seqs=5,
|
||||
dtype="bfloat16",
|
||||
limit_mm_per_prompt={"audio": 2},
|
||||
cudagraph_capture_sizes=[1, 2, 4, 8],
|
||||
gpu_memory_utilization=0.9) as runner:
|
||||
outputs = runner.generate(inputs, sampling_params=sampling_params)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user