[CI] Add long and short prompt tests for DeepSeek-V3.2 (#6536)
### What this PR does / why we need it? This version has no divisibility constraint between tp and mtp+1. However, cudagraph_capture_sizes must be a common multiple of tp and mtp+1, with a maximum of tp * (mtp+1). Therefore, we fixed cudagraph_capture_sizes. We added a long-sequence test (64k input, 3k output) for the two-node mixed deployment scenario. Due to the excessive time required for performance benchmarking, we are only verifying functionality. The single-node scenario is skipped because VRAM limitations prevent launching the model with a max-model-len of 68,000. and we also add aime2025 test for dual-node deepseek 3.2 nightly test. ### How was this patch tested? test at nightly environment. - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 Signed-off-by: guozr <guozr1997@hotmail.com> Co-authored-by: guozr <guozr1997@hotmail.com>
This commit is contained in:
@@ -255,18 +255,18 @@ def test_deepseek3_2_w8a8_pruning_mtp_tp2_ep():
|
|||||||
long_example_prompts = [
|
long_example_prompts = [
|
||||||
"Hello " * (163839 - 500) + "Hello"
|
"Hello " * (163839 - 500) + "Hello"
|
||||||
]
|
]
|
||||||
max_tokens = 500
|
max_tokens = 500
|
||||||
with VllmRunner("vllm-ascend/DeepSeek-V3.2-W8A8-Pruning",
|
with VllmRunner("vllm-ascend/DeepSeek-V3.2-W8A8-Pruning",
|
||||||
tensor_parallel_size=2,
|
tensor_parallel_size=2,
|
||||||
quantization="ascend",
|
quantization="ascend",
|
||||||
enable_expert_parallel=True,
|
enable_expert_parallel=True,
|
||||||
max_model_len=163840,
|
max_model_len=163840,
|
||||||
compilation_config={
|
compilation_config={
|
||||||
"cudagraph_capture_sizes": [3, 6, 9, 12],
|
"cudagraph_capture_sizes": [2, 4, 6, 8, 10, 12],
|
||||||
"cudagraph_mode": "FULL_DECODE_ONLY"
|
"cudagraph_mode": "FULL_DECODE_ONLY"
|
||||||
},
|
},
|
||||||
speculative_config={
|
speculative_config={
|
||||||
"num_speculative_tokens": 2,
|
"num_speculative_tokens": 1,
|
||||||
"method": "deepseek_mtp"
|
"method": "deepseek_mtp"
|
||||||
},
|
},
|
||||||
additional_config={
|
additional_config={
|
||||||
|
|||||||
@@ -11,9 +11,11 @@ env_common:
|
|||||||
OMP_PROC_BIND: false
|
OMP_PROC_BIND: false
|
||||||
OMP_NUM_THREADS: 1
|
OMP_NUM_THREADS: 1
|
||||||
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
|
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
|
||||||
|
VLLM_ASCEND_ENABLE_MLAPO: 1
|
||||||
VLLM_ASCEND_ENABLE_FLASHCOMM1: 1
|
VLLM_ASCEND_ENABLE_FLASHCOMM1: 1
|
||||||
ASCEND_A3_EBA_ENABLE: 1
|
ASCEND_A3_EBA_ENABLE: 1
|
||||||
|
|
||||||
|
# TODO: need to identify why TP and mtp+1 divisibility rules break on dual-node case
|
||||||
|
|
||||||
deployment:
|
deployment:
|
||||||
-
|
-
|
||||||
@@ -30,13 +32,13 @@ deployment:
|
|||||||
--seed 1024
|
--seed 1024
|
||||||
--enable-expert-parallel
|
--enable-expert-parallel
|
||||||
--max-num-seqs 16
|
--max-num-seqs 16
|
||||||
--max-model-len 8192
|
--max-model-len 68000
|
||||||
--max-num-batched-tokens 4096
|
--max-num-batched-tokens 4096
|
||||||
--no-enable-prefix-caching
|
--no-enable-prefix-caching
|
||||||
--gpu-memory-utilization 0.85
|
--gpu-memory-utilization 0.85
|
||||||
--trust-remote-code
|
--trust-remote-code
|
||||||
--speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}'
|
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
|
||||||
--compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||||
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
||||||
--tokenizer-mode deepseek_v32
|
--tokenizer-mode deepseek_v32
|
||||||
--reasoning-parser deepseek_v3
|
--reasoning-parser deepseek_v3
|
||||||
@@ -55,27 +57,51 @@ deployment:
|
|||||||
--seed 1024
|
--seed 1024
|
||||||
--enable-expert-parallel
|
--enable-expert-parallel
|
||||||
--max-num-seqs 16
|
--max-num-seqs 16
|
||||||
--max-model-len 8192
|
--max-model-len 68000
|
||||||
--max-num-batched-tokens 4096
|
--max-num-batched-tokens 4096
|
||||||
--no-enable-prefix-caching
|
--no-enable-prefix-caching
|
||||||
--gpu-memory-utilization 0.85
|
--gpu-memory-utilization 0.85
|
||||||
--trust-remote-code
|
--trust-remote-code
|
||||||
--speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}'
|
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
|
||||||
--compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||||
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
||||||
--tokenizer-mode deepseek_v32
|
--tokenizer-mode deepseek_v32
|
||||||
--reasoning-parser deepseek_v3
|
--reasoning-parser deepseek_v3
|
||||||
benchmarks:
|
benchmarks:
|
||||||
perf:
|
perf_short_warmup:
|
||||||
|
case_type: performance
|
||||||
|
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
||||||
|
request_conf: vllm_api_stream_chat
|
||||||
|
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||||
|
num_prompts: 1
|
||||||
|
max_out_len: 3000
|
||||||
|
batch_size: 512
|
||||||
|
request_rate: 11.2
|
||||||
|
baseline: 1253.8466
|
||||||
|
threshold: 0.97
|
||||||
|
|
||||||
|
perf_long_warmup:
|
||||||
|
case_type: performance
|
||||||
|
dataset_path: vllm-ascend/GSM8K-in64000-bs2800
|
||||||
|
request_conf: vllm_api_stream_chat
|
||||||
|
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||||
|
num_prompts: 1
|
||||||
|
max_out_len: 3000
|
||||||
|
batch_size: 1
|
||||||
|
request_rate: 11.2
|
||||||
|
baseline: 1253.8466
|
||||||
|
threshold: 0.97
|
||||||
|
|
||||||
|
perf_short:
|
||||||
case_type: performance
|
case_type: performance
|
||||||
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
||||||
request_conf: vllm_api_stream_chat
|
request_conf: vllm_api_stream_chat
|
||||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||||
num_prompts: 512
|
num_prompts: 512
|
||||||
max_out_len: 3000
|
max_out_len: 3000
|
||||||
batch_size: 512
|
batch_size: 1
|
||||||
request_rate: 11.2
|
request_rate: 11.2
|
||||||
baseline: 1253.8466
|
baseline: 148 # after switch vllm to 0.15.0, the baseline reduced significantly, need to confirm if it's a regression or just a more strict measurement
|
||||||
threshold: 0.97
|
threshold: 0.97
|
||||||
|
|
||||||
acc:
|
acc:
|
||||||
@@ -87,3 +113,13 @@ benchmarks:
|
|||||||
batch_size: 64
|
batch_size: 64
|
||||||
baseline: 95
|
baseline: 95
|
||||||
threshold: 5
|
threshold: 5
|
||||||
|
|
||||||
|
acc_aime2025:
|
||||||
|
case_type: accuracy
|
||||||
|
dataset_path: vllm-ascend/aime2025
|
||||||
|
request_conf: vllm_api_general_chat
|
||||||
|
dataset_conf: aime2025/aime2025_gen_0_shot_chat_prompt
|
||||||
|
max_out_len: 80000
|
||||||
|
batch_size: 32
|
||||||
|
baseline: 40
|
||||||
|
threshold: 7
|
||||||
|
|||||||
@@ -45,6 +45,17 @@ aisbench_cases = [{
|
|||||||
"batch_size": 8,
|
"batch_size": 8,
|
||||||
"baseline": 95,
|
"baseline": 95,
|
||||||
"threshold": 5
|
"threshold": 5
|
||||||
|
}, {
|
||||||
|
"case_type": "performance",
|
||||||
|
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||||
|
"request_conf": "vllm_api_stream_chat",
|
||||||
|
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
|
||||||
|
"num_prompts": 1,
|
||||||
|
"max_out_len": 1500,
|
||||||
|
"batch_size": 1,
|
||||||
|
"request_rate": 11.2,
|
||||||
|
"baseline": 134,
|
||||||
|
"threshold": 0.97
|
||||||
}, {
|
}, {
|
||||||
"case_type": "performance",
|
"case_type": "performance",
|
||||||
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
|
||||||
@@ -56,7 +67,8 @@ aisbench_cases = [{
|
|||||||
"request_rate": 11.2,
|
"request_rate": 11.2,
|
||||||
"baseline": 134,
|
"baseline": 134,
|
||||||
"threshold": 0.97
|
"threshold": 0.97
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@@ -81,10 +93,10 @@ async def test_models(model: str, tp_size: int, dp_size: int) -> None:
|
|||||||
str(dp_size), "--port",
|
str(dp_size), "--port",
|
||||||
str(port), "--max-model-len", "8192", "--max-num-batched-tokens",
|
str(port), "--max-model-len", "8192", "--max-num-batched-tokens",
|
||||||
"8192", "--max-num-seqs", "4", "--trust-remote-code", "--quantization",
|
"8192", "--max-num-seqs", "4", "--trust-remote-code", "--quantization",
|
||||||
"ascend", "--gpu-memory-utilization", "0.92", "--compilation-config",
|
"ascend", "--gpu-memory-utilization", "0.98", "--compilation-config",
|
||||||
'{"cudagraph_capture_sizes":[3, 6, 9, 12], "cudagraph_mode":"FULL_DECODE_ONLY"}',
|
'{"cudagraph_capture_sizes":[8, 16, 24, 32, 40, 48], "cudagraph_mode":"FULL_DECODE_ONLY"}',
|
||||||
"--speculative-config",
|
"--speculative-config",
|
||||||
'{"num_speculative_tokens": 2, "method":"deepseek_mtp"}',
|
'{"num_speculative_tokens": 3, "method":"deepseek_mtp"}',
|
||||||
"--additional-config",
|
"--additional-config",
|
||||||
'{"layer_sharding": ["q_b_proj", "o_proj"]}',
|
'{"layer_sharding": ["q_b_proj", "o_proj"]}',
|
||||||
"--reasoning-parser", "deepseek_v3", "--tokenizer_mode", "deepseek_v32"
|
"--reasoning-parser", "deepseek_v3", "--tokenizer_mode", "deepseek_v32"
|
||||||
|
|||||||
Reference in New Issue
Block a user