aclgraph is stable and fast now. Let's drop torchair graph mode now.
TODO: some logic to adapt torchair should be cleaned up as well. We'll
do it in the following PR.
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
31 lines
723 B
Bash
31 lines
723 B
Bash
|
|
export HCCL_IF_IP=2.0.0.0
|
|
export GLOO_SOCKET_IFNAME="eth0"
|
|
export TP_SOCKET_IFNAME="eth0"
|
|
export HCCL_SOCKET_IFNAME="eth0"
|
|
|
|
export OMP_PROC_BIND=false
|
|
export OMP_NUM_THREADS=10
|
|
|
|
export VLLM_USE_MODELSCOPE=true
|
|
|
|
export ASCEND_LAUNCH_BLOCKING=0
|
|
|
|
vllm serve Qwen/Qwen1.5-MoE-A2.7B \
|
|
--host 0.0.0.0 \
|
|
--port 20002 \
|
|
--served-model-name Qwen \
|
|
--data-parallel-size 2 \
|
|
--data-parallel-size-local 2 \
|
|
--data-parallel-address 2.0.0.0 \
|
|
--data-parallel-rpc-port 13389 \
|
|
--tensor-parallel-size 4 \
|
|
--enable-expert-parallel \
|
|
--no-enable-prefix-caching \
|
|
--max-num-seqs 16 \
|
|
--max-model-len 4096 \
|
|
--max-num-batched-tokens 4096 \
|
|
--gpu-memory-utilization 0.9 \
|
|
--trust-remote-code \
|
|
--enforce-eager
|