[Test] Add nightly test for DeepSeek-V3.2-Exp (#3908)

### What this PR does / why we need it?
Add nightly test for DeepSeek-V3.2-Exp


### How was this patch tested?
test action:

https://github.com/vllm-project/vllm-ascend/actions/runs/19156153634/job/54757008557?pr=3908


- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2025-11-11 10:29:57 +08:00
committed by GitHub
parent e384755ce1
commit b77b4f1abf
5 changed files with 228 additions and 1 deletions

View File

@@ -33,6 +33,9 @@ on:
tests:
required: true
type: string
name:
required: false
type: string
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
@@ -94,6 +97,19 @@ jobs:
pip install -r requirements-dev.txt
pip install -v -e .
- name: Install custom-ops (for DeepSeek-V3.2-Exp)
if: ${{ inputs.name == 'deepseek3_2-exp-w8a8' }}
shell: bash -l {0}
run: |
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/CANN-custom_ops-sfa-linux.aarch64.run
chmod +x ./CANN-custom_ops-sfa-linux.aarch64.run
./CANN-custom_ops-sfa-linux.aarch64.run --quiet
export ASCEND_CUSTOM_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize:${ASCEND_CUSTOM_OPP_PATH}
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/:${LD_LIBRARY_PATH}
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/custom_ops-1.0-cp311-cp311-linux_aarch64.whl
pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl
. /usr/local/Ascend/ascend-toolkit/set_env.sh
- name: Checkout aisbench repo and Install aisbench
run: |
git clone https://gitee.com/aisbench/benchmark.git

View File

@@ -84,12 +84,16 @@ jobs:
- name: qwen3-235b-w8a8
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/models/test_qwen3_235b_w8a8.py
- name: deepseek3_2-exp-w8a8
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/models/test_deepseek_v3_2_exp_w8a8.py
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
with:
vllm: v0.11.0
runner: ${{ matrix.test_config.os }}
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.3.rc1-a3-ubuntu22.04-py3.11
tests: ${{ matrix.test_config.tests }}
name: ${{ matrix.test_config.name }}
multi-node-tests:
name: multi-node
@@ -115,6 +119,9 @@ jobs:
- name: multi-node-glm-2node
config_file_path: GLM-4_5.yaml
size: 2
- name: multi-node-dpsk3.2-exp-2node
config_file_path: DeepSeek-V3_2-Exp-bf16.yaml
size: 2
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
with:
soc_version: a3

View File

@@ -0,0 +1,120 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases
MODELS = [
"vllm-ascend/DeepSeek-V3.2-Exp-W8A8",
]
TENSOR_PARALLELS = [8]
DATA_PARALLELS = [2]
FULL_GRAPH = [True, False]
prompts = [
"San Francisco is a",
]
api_keyword_args = {
"max_tokens": 10,
}
aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 4096,
"batch_size": 8,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 16,
"max_out_len": 1500,
"batch_size": 8,
"request_rate": 0,
"baseline": 1,
"threshold": 0.97
}]
@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
@pytest.mark.parametrize("full_graph", FULL_GRAPH)
async def test_models(model: str, tp_size: int, dp_size: int,
full_graph: bool) -> None:
port = get_open_port()
env_dict = {"HCCL_BUFFSIZE": "1024", "VLLM_ASCEND_ENABLE_MLAPO": "0"}
server_args = [
"--no-enable-prefix-caching",
"--enable-expert-parallel",
"--tensor-parallel-size",
str(tp_size),
"--data-parallel-size",
str(dp_size),
"--port",
str(port),
"--max-model-len",
"16384",
"--max-num-batched-tokens",
"16384",
"--block-size",
"16",
"--trust-remote-code",
"--quantization",
"ascend",
"--gpu-memory-utilization",
"0.9",
"--additional-config",
'{"ascend_scheduler_config":{"enabled":true},'
'"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}',
]
if full_graph:
server_args += [
"--compilation-config",
'{"cudagraph_capture": [16], "cudagraph_model":"FULL_DECODE_ONLY"}'
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)

View File

@@ -0,0 +1,55 @@
test_name: "test DeepSeek-V3.2-Exp-bf16 multi-dp"
model: "Yanguan/DeepSeek-V3.2-Exp-bf16"
num_nodes: 2
npu_per_node: 16
env_common:
VLLM_USE_MODELSCOPE: true
OMP_PROC_BIND: false
OMP_NUM_THREADS: 100
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
VLLM_ASCEND_ENABLE_MLAPO: 0
deployment:
-
server_cmd: >
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-address $LOCAL_IP
--data-parallel-size 2
--data-parallel-size-local 1
--data-parallel-rpc-port 13389
--tensor-parallel-size 16
--seed 1024
--enable-expert-parallel
--max-num-seqs 16
--max-model-len 17450
--max-num-batched-tokens 17450
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
-
server_cmd: >
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
--host 0.0.0.0
--port $SERVER_PORT
--headless
--data-parallel-size 2
--data-parallel-size-local 1
--data-parallel-start-rank 1
--data-parallel-address $MASTER_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 16
--seed 1024
--max-num-seqs 16
--max-model-len 17450
--max-num-batched-tokens 17450
--enable-expert-parallel
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.92
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
benchmarks:

View File

@@ -107,6 +107,32 @@ install_ais_bench() {
cd -
}
install_extra_components() {
echo "====> Installing extra components for DeepSeek-v3.2-exp-bf16"
if ! wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/CANN-custom_ops-sfa-linux.aarch64.run; then
echo "Failed to download CANN-custom_ops-sfa-linux.aarch64.run"
return 1
fi
chmod +x ./CANN-custom_ops-sfa-linux.aarch64.run
./CANN-custom_ops-sfa-linux.aarch64.run --quiet
if ! wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/custom_ops-1.0-cp311-cp311-linux_aarch64.whl; then
echo "Failed to download custom_ops wheel"
return 1
fi
pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl
export ASCEND_CUSTOM_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize:${ASCEND_CUSTOM_OPP_PATH}
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/:${LD_LIBRARY_PATH}
source /usr/local/Ascend/ascend-toolkit/set_env.sh
EOF
rm -f CANN-custom_ops-sfa-linux.aarch64.run \
custom_ops-1.0-cp311-cp311-linux_aarch64.whl
echo "====> Extra components installation completed"
}
kill_npu_processes() {
pgrep python3 | xargs -r kill -9
pgrep VLLM | xargs -r kill -9
@@ -140,6 +166,9 @@ main() {
checkout_src
install_sys_dependencies
install_vllm
if [[ "$CONFIG_YAML_PATH" == *"DeepSeek-V3_2-Exp-bf16.yaml" ]]; then
install_extra_components
fi
install_ais_bench
cd "$WORKSPACE/source_code"
. $SRC_DIR/vllm-ascend/tests/e2e/nightly/multi_node/scripts/build_mooncake.sh