Files
xc-llm-ascend/tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-bf16.yaml
zhangxinyuehfad b77b4f1abf [Test] Add nightly test for DeepSeek-V3.2-Exp (#3908)
### What this PR does / why we need it?
Add nightly test for DeepSeek-V3.2-Exp


### How was this patch tested?
test action:

https://github.com/vllm-project/vllm-ascend/actions/runs/19156153634/job/54757008557?pr=3908


- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
2025-11-11 10:29:57 +08:00

56 lines
1.7 KiB
YAML

test_name: "test DeepSeek-V3.2-Exp-bf16 multi-dp"
model: "Yanguan/DeepSeek-V3.2-Exp-bf16"
num_nodes: 2
npu_per_node: 16
env_common:
VLLM_USE_MODELSCOPE: true
OMP_PROC_BIND: false
OMP_NUM_THREADS: 100
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
VLLM_ASCEND_ENABLE_MLAPO: 0
deployment:
-
server_cmd: >
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-address $LOCAL_IP
--data-parallel-size 2
--data-parallel-size-local 1
--data-parallel-rpc-port 13389
--tensor-parallel-size 16
--seed 1024
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 17450
--max-num-batched-tokens 17450
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
-
server_cmd: >
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
--host 0.0.0.0
--port $SERVER_PORT
--headless
--data-parallel-size 2
--data-parallel-size-local 1
--data-parallel-start-rank 1
--data-parallel-address $MASTER_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 16
--seed 1024
--max-num-seqs 16
--max-model-len 17450
--max-num-batched-tokens 17450
--enable-expert-parallel
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.92
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
benchmarks: