### What this PR does / why we need it?
This patch mainly fix the the problem of not being able to determine the
exit status of the pod's entrypoint script and some other tiny
optimizations:
1. Shorten wait for server timeout
2. fix typo
3. fix the issue of ais_bench failing to correctly access the proxy URL
in a PD separation scenario.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.11.0
- vLLM main:
83f478bb19
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
65 lines
2.1 KiB
YAML
65 lines
2.1 KiB
YAML
test_name: "test DeepSeek-R1-W8A8 torchair on A2"
|
|
model: "vllm-ascend/DeepSeek-R1-0528-W8A8"
|
|
num_nodes: 2
|
|
npu_per_node: 8
|
|
env_common:
|
|
VLLM_USE_MODELSCOPE: true
|
|
HCCL_BUFFSIZE: 1024
|
|
SERVER_PORT: 8080
|
|
OMP_PROC_BIND: false
|
|
OMP_NUM_THREADS: 10
|
|
|
|
|
|
deployment:
|
|
-
|
|
server_cmd: >
|
|
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
|
--host 0.0.0.0
|
|
--port $SERVER_PORT
|
|
--data-parallel-size 4
|
|
--data-parallel-size-local 2
|
|
--data-parallel-address $LOCAL_IP
|
|
--data-parallel-rpc-port 13399
|
|
--no-enable-prefix-caching
|
|
--max-num-seqs 16
|
|
--tensor-parallel-size 4
|
|
--max-model-len 36864
|
|
--max-num-batched-tokens 6000
|
|
--enable-expert-parallel
|
|
--trust-remote-code
|
|
--quantization ascend
|
|
--gpu-memory-utilization 0.9
|
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
|
|
|
-
|
|
server_cmd: >
|
|
vllm serve vllm-ascend/DeepSeek-R1-0528-W8A8
|
|
--headless
|
|
--data-parallel-size 4
|
|
--data-parallel-rpc-port 13399
|
|
--data-parallel-size-local 2
|
|
--data-parallel-start-rank 2
|
|
--data-parallel-address $MASTER_IP
|
|
--no-enable-prefix-caching
|
|
--max-num-seqs 16
|
|
--tensor-parallel-size 4
|
|
--max-model-len 36864
|
|
--max-num-batched-tokens 6000
|
|
--enable-expert-parallel
|
|
--trust-remote-code
|
|
--quantization ascend
|
|
--gpu-memory-utilization 0.9
|
|
--speculative-config '{"num_speculative_tokens": 1, "method":"deepseek_mtp"}'
|
|
--additional-config '{"ascend_scheduler_config":{"enabled":false},"torchair_graph_config":{"enabled":true,"enable_multistream_moe":true},"chunked_prefill_for_mla":true,"enable_weight_nz_layout":true}'
|
|
benchmarks:
|
|
acc:
|
|
case_type: accuracy
|
|
dataset_path: vllm-ascend/gsm8k
|
|
request_conf: vllm_api_general_chat
|
|
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
|
|
max_out_len: 32768
|
|
batch_size: 512
|
|
baseline: 95
|
|
threshold: 5
|