Files
xc-llm-ascend/tests/e2e/nightly/multi_node/config/DeepSeek-V3_2-W8A8-A3-dual-nodes.yaml
starmountain1997 bc1622338c [CI] Add long and short prompt tests for DeepSeek-V3.2 (#6536)
### What this PR does / why we need it?

This version has no divisibility constraint between tp and mtp+1.
However, cudagraph_capture_sizes must be a common multiple of tp and
mtp+1, with a maximum of tp * (mtp+1). Therefore, we fixed
cudagraph_capture_sizes.

We added a long-sequence test (64k input, 3k output) for the two-node
mixed deployment scenario. Due to the excessive time required for
performance benchmarking, we are only verifying functionality. The
single-node scenario is skipped because VRAM limitations prevent
launching the model with a max-model-len of 68,000.

and we also add aime2025 test for dual-node deepseek 3.2 nightly test.

### How was this patch tested?

test at nightly environment.

- vLLM version: v0.15.0
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0

Signed-off-by: guozr <guozr1997@hotmail.com>
Co-authored-by: guozr <guozr1997@hotmail.com>
2026-02-26 10:58:50 +08:00

126 lines
3.8 KiB
YAML

test_name: "test DeepSeek-V3.2-W8A8 on A3"
model: "vllm-ascend/DeepSeek-V3.2-W8A8"
num_nodes: 2
npu_per_node: 16
env_common:
HCCL_OP_EXPANSION_MODE: "AIV"
VLLM_USE_MODELSCOPE: true
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
OMP_PROC_BIND: false
OMP_NUM_THREADS: 1
PYTORCH_NPU_ALLOC_CONF: "expandable_segments:True"
VLLM_ASCEND_ENABLE_MLAPO: 1
VLLM_ASCEND_ENABLE_FLASHCOMM1: 1
ASCEND_A3_EBA_ENABLE: 1
# TODO: need to identify why TP and mtp+1 divisibility rules break on dual-node case
deployment:
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-V3.2-W8A8
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-address $LOCAL_IP
--data-parallel-rpc-port 13399
--tensor-parallel-size 8
--quantization ascend
--seed 1024
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 68000
--max-num-batched-tokens 4096
--no-enable-prefix-caching
--gpu-memory-utilization 0.85
--trust-remote-code
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
--tokenizer-mode deepseek_v32
--reasoning-parser deepseek_v3
-
server_cmd: >
vllm serve vllm-ascend/DeepSeek-V3.2-W8A8
--headless
--data-parallel-size 4
--data-parallel-rpc-port 13399
--data-parallel-size-local 2
--data-parallel-start-rank 2
--data-parallel-address $MASTER_IP
--tensor-parallel-size 8
--quantization ascend
--seed 1024
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 68000
--max-num-batched-tokens 4096
--no-enable-prefix-caching
--gpu-memory-utilization 0.85
--trust-remote-code
--speculative-config '{"num_speculative_tokens": 3, "method":"deepseek_mtp"}'
--compilation-config '{"cudagraph_capture_sizes": [8, 16, 24, 32, 40, 48], "cudagraph_mode": "FULL_DECODE_ONLY"}'
--additional-config '{"layer_sharding": ["q_b_proj", "o_proj"]}'
--tokenizer-mode deepseek_v32
--reasoning-parser deepseek_v3
benchmarks:
perf_short_warmup:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 1
max_out_len: 3000
batch_size: 512
request_rate: 11.2
baseline: 1253.8466
threshold: 0.97
perf_long_warmup:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in64000-bs2800
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 1
max_out_len: 3000
batch_size: 1
request_rate: 11.2
baseline: 1253.8466
threshold: 0.97
perf_short:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 512
max_out_len: 3000
batch_size: 1
request_rate: 11.2
baseline: 148 # after switch vllm to 0.15.0, the baseline reduced significantly, need to confirm if it's a regression or just a more strict measurement
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 4096
batch_size: 64
baseline: 95
threshold: 5
acc_aime2025:
case_type: accuracy
dataset_path: vllm-ascend/aime2025
request_conf: vllm_api_general_chat
dataset_conf: aime2025/aime2025_gen_0_shot_chat_prompt
max_out_len: 80000
batch_size: 32
baseline: 40
threshold: 7