[CI] Add long and short prompt tests for DeepSeek-V3.2 (#6536)
### What this PR does / why we need it? This version has no divisibility constraint between tp and mtp+1. However, cudagraph_capture_sizes must be a common multiple of tp and mtp+1, with a maximum of tp * (mtp+1). Therefore, we fixed cudagraph_capture_sizes. We added a long-sequence test (64k input, 3k output) for the two-node mixed deployment scenario. Due to the excessive time required for performance benchmarking, we are only verifying functionality. The single-node scenario is skipped because VRAM limitations prevent launching the model with a max-model-len of 68,000. and we also add aime2025 test for dual-node deepseek 3.2 nightly test. ### How was this patch tested? test at nightly environment. - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 Signed-off-by: guozr <guozr1997@hotmail.com> Co-authored-by: guozr <guozr1997@hotmail.com>
This commit is contained in:
@@ -11,9 +11,11 @@ env_common:
|
||||
OMP_PROC_BIND: false
|
||||
OMP_NUM_THREADS: 1
|
||||
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
|
||||
VLLM_ASCEND_ENABLE_MLAPO: 1
|
||||
VLLM_ASCEND_ENABLE_FLASHCOMM1: 1
|
||||
ASCEND_A3_EBA_ENABLE: 1
|
||||
|
||||
# TODO: need to identify why TP and mtp+1 divisibility rules break on dual-node case
|
||||
|
||||
deployment:
|
||||
-
|
||||
@@ -30,13 +32,13 @@ deployment:
|
||||
--seed 1024
|
||||
--enable-expert-parallel
|
||||
--max-num-seqs 16
|
||||
--max-model-len 8192
|
||||
--max-model-len 68000
|
||||
--max-num-batched-tokens 4096
|
||||
--no-enable-prefix-caching
|
||||
--gpu-memory-utilization 0.85
|
||||
--trust-remote-code
|
||||
--speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}'
|
||||
--compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
|
||||
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
||||
--tokenizer-mode deepseek_v32
|
||||
--reasoning-parser deepseek_v3
|
||||
@@ -55,27 +57,51 @@ deployment:
|
||||
--seed 1024
|
||||
--enable-expert-parallel
|
||||
--max-num-seqs 16
|
||||
--max-model-len 8192
|
||||
--max-model-len 68000
|
||||
--max-num-batched-tokens 4096
|
||||
--no-enable-prefix-caching
|
||||
--gpu-memory-utilization 0.85
|
||||
--trust-remote-code
|
||||
--speculative-config '{"num_speculative_tokens": 2, "method":"deepseek_mtp"}'
|
||||
--compilation-config '{"cudagraph_capture_sizes": [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
|
||||
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
|
||||
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
|
||||
--tokenizer-mode deepseek_v32
|
||||
--reasoning-parser deepseek_v3
|
||||
benchmarks:
|
||||
perf:
|
||||
perf_short_warmup:
|
||||
case_type: performance
|
||||
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
||||
request_conf: vllm_api_stream_chat
|
||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||
num_prompts: 1
|
||||
max_out_len: 3000
|
||||
batch_size: 512
|
||||
request_rate: 11.2
|
||||
baseline: 1253.8466
|
||||
threshold: 0.97
|
||||
|
||||
perf_long_warmup:
|
||||
case_type: performance
|
||||
dataset_path: vllm-ascend/GSM8K-in64000-bs2800
|
||||
request_conf: vllm_api_stream_chat
|
||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||
num_prompts: 1
|
||||
max_out_len: 3000
|
||||
batch_size: 1
|
||||
request_rate: 11.2
|
||||
baseline: 1253.8466
|
||||
threshold: 0.97
|
||||
|
||||
perf_short:
|
||||
case_type: performance
|
||||
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
|
||||
request_conf: vllm_api_stream_chat
|
||||
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
|
||||
num_prompts: 512
|
||||
max_out_len: 3000
|
||||
batch_size: 512
|
||||
batch_size: 1
|
||||
request_rate: 11.2
|
||||
baseline: 1253.8466
|
||||
baseline: 148 # after switch vllm to 0.15.0, the baseline reduced significantly, need to confirm if it's a regression or just a more strict measurement
|
||||
threshold: 0.97
|
||||
|
||||
acc:
|
||||
@@ -87,3 +113,13 @@ benchmarks:
|
||||
batch_size: 64
|
||||
baseline: 95
|
||||
threshold: 5
|
||||
|
||||
acc_aime2025:
|
||||
case_type: accuracy
|
||||
dataset_path: vllm-ascend/aime2025
|
||||
request_conf: vllm_api_general_chat
|
||||
dataset_conf: aime2025/aime2025_gen_0_shot_chat_prompt
|
||||
max_out_len: 80000
|
||||
batch_size: 32
|
||||
baseline: 40
|
||||
threshold: 7
|
||||
|
||||
Reference in New Issue
Block a user