From bea3d5bbb47954249d56fcf2e92f7a636a0ab134 Mon Sep 17 00:00:00 2001 From: xleoken Date: Sat, 2 Aug 2025 16:52:12 +0800 Subject: [PATCH] [Bug] Fix run bug in run_dp_server.sh (#2139) ### What this PR does / why we need it? For `Qwen2.5-0.5B-Instruct` model - the model's total number of attention heads (14) must be divisible by tensor parallel size. (4 -> 2) - the model does not support enable-expert-parallel ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Local Test. - vLLM version: v0.10.0 - vLLM main: https://github.com/vllm-project/vllm/commit/ad57f23f6a528ab01066998b41796a44340fd43d Signed-off-by: xleoken --- examples/run_dp_server.sh | 47 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/examples/run_dp_server.sh b/examples/run_dp_server.sh index eb3cfbf..1866fb0 100644 --- a/examples/run_dp_server.sh +++ b/examples/run_dp_server.sh @@ -1,33 +1,32 @@ -rm -rf ./.torchair_cache/ -rm -rf ./dynamo_* -rm -rf /root/ascend/log/debug/plog/* export HCCL_IF_IP=2.0.0.0 -export GLOO_SOCKET_IFNAME="enp189s0f0" -export TP_SOCKET_IFNAME="enp189s0f0" -export HCCL_SOCKET_IFNAME="enp189s0f0" +export GLOO_SOCKET_IFNAME="eth0" +export TP_SOCKET_IFNAME="eth0" +export HCCL_SOCKET_IFNAME="eth0" export OMP_PROC_BIND=false export OMP_NUM_THREADS=100 export VLLM_USE_V1=1 +export VLLM_USE_MODELSCOPE=true + export ASCEND_LAUNCH_BLOCKING=0 -vllm serve /data/weights/Qwen2.5-0.5B-Instruct \ - --host 0.0.0.0 \ - --port 20002 \ - --served-model-name Qwen \ - --data-parallel-size 4 \ - --data-parallel-size-local 4 \ - --data-parallel-address 2.0.0.0 \ - --data-parallel-rpc-port 13389 \ - --tensor-parallel-size 4 \ - --enable-expert-parallel \ - --no-enable-prefix-caching \ - --max-num-seqs 16 \ - --max-model-len 4096 \ - --max-num-batched-tokens 4096 \ - --gpu-memory-utilization 0.9 \ - --trust-remote-code \ - --enforce-eager \ - --additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":false, "enable_multistream_moe":false, "use_cached_graph":false}}' +vllm serve Qwen/Qwen1.5-MoE-A2.7B \ + --host 0.0.0.0 \ + --port 20002 \ + --served-model-name Qwen \ + --data-parallel-size 2 \ + --data-parallel-size-local 2 \ + --data-parallel-address 2.0.0.0 \ + --data-parallel-rpc-port 13389 \ + --tensor-parallel-size 4 \ + --enable-expert-parallel \ + --no-enable-prefix-caching \ + --max-num-seqs 16 \ + --max-model-len 4096 \ + --max-num-batched-tokens 4096 \ + --gpu-memory-utilization 0.9 \ + --trust-remote-code \ + --enforce-eager \ + --additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":false, "enable_multistream_moe":false, "use_cached_graph":false}}'