[Test] Add e2e test and accuracy test for Qwen3-Next-80B-A3B-Instruct (#3450)
### What this PR does / why we need it? Add e2e test and accuracy test for Qwen3-Next-80B-A3B-Instruct ### How was this patch tested? accuracy test: https://github.com/vllm-project/vllm-ascend/actions/runs/18771221544/job/53556027634?pr=3450 ci test: https://github.com/vllm-project/vllm-ascend/actions/runs/18771221530/job/53556027614?pr=3450 <img width="1703" height="562" alt="image" src="https://github.com/user-attachments/assets/973b6cfa-8240-41e3-893a-5024ff8d0693" /> - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
10
.github/workflows/_accuracy_test.yaml
vendored
10
.github/workflows/_accuracy_test.yaml
vendored
@@ -73,6 +73,16 @@ jobs:
|
||||
working-directory: ./vllm-empty
|
||||
run: |
|
||||
VLLM_TARGET_DEVICE=empty pip install -e .
|
||||
|
||||
- name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct)
|
||||
if: ${{ inputs.model_name == 'Qwen3-Next-80B-A3B-Instruct' }}
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run
|
||||
chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run
|
||||
/tmp/Ascend-BiSheng-toolkit_aarch64.run --install
|
||||
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
|
||||
python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl"
|
||||
|
||||
- name: Resolve vllm-ascend version
|
||||
run: |
|
||||
|
||||
2
.github/workflows/accuracy_test.yaml
vendored
2
.github/workflows/accuracy_test.yaml
vendored
@@ -57,6 +57,8 @@ jobs:
|
||||
model_name: Qwen3-VL-30B-A3B-Instruct
|
||||
- runner: a2-2
|
||||
model_name: DeepSeek-V2-Lite
|
||||
- runner: a2-4
|
||||
model_name: Qwen3-Next-80B-A3B-Instruct
|
||||
fail-fast: false
|
||||
# test will be triggered when tag 'accuracy-test' & 'ready-for-test'
|
||||
if: >-
|
||||
|
||||
59
.github/workflows/vllm_ascend_dist.yaml
vendored
59
.github/workflows/vllm_ascend_dist.yaml
vendored
@@ -19,8 +19,36 @@ name: 'e2e test / a3-test'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- '*-dev'
|
||||
paths:
|
||||
- '.github/workflows/vllm_ascend_dist.yml'
|
||||
- 'tests/e2e/multicard/**'
|
||||
- 'Dockerfile'
|
||||
- 'vllm_ascend/**'
|
||||
- 'setup.py'
|
||||
- 'pyproject.toml'
|
||||
- 'requirements.txt'
|
||||
- 'cmake/**'
|
||||
- 'CMakeLists.txt'
|
||||
- 'csrc/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '*-dev'
|
||||
paths:
|
||||
- '.github/workflows/vllm_ascend_dist.yml'
|
||||
- 'tests/e2e/multicard/**'
|
||||
- 'Dockerfile'
|
||||
- 'vllm_ascend/**'
|
||||
- 'setup.py'
|
||||
- 'pyproject.toml'
|
||||
- 'requirements.txt'
|
||||
- 'cmake/**'
|
||||
- 'CMakeLists.txt'
|
||||
- 'csrc/**'
|
||||
types: [ labeled ]
|
||||
|
||||
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
|
||||
@@ -39,10 +67,10 @@ concurrency:
|
||||
jobs:
|
||||
e2e:
|
||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' }}
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'push'}}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [linux-aarch64-a3-8]
|
||||
os: [linux-aarch64-a3-4]
|
||||
vllm_version: [v0.11.0]
|
||||
name: vLLM Ascend test
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -61,11 +89,13 @@ jobs:
|
||||
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||
apt-get update -y
|
||||
apt install git -y
|
||||
apt install git wget curl -y
|
||||
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
|
||||
|
||||
- name: Checkout vllm-project/vllm-ascend repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ./vllm-ascend
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
@@ -85,6 +115,7 @@ jobs:
|
||||
VLLM_TARGET_DEVICE=empty pip install -e .
|
||||
|
||||
- name: Install vllm-project/vllm-ascend
|
||||
working-directory: ./vllm-ascend
|
||||
run: |
|
||||
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
|
||||
@@ -92,6 +123,7 @@ jobs:
|
||||
pip install -v -e .
|
||||
|
||||
- name: Run vllm-project/vllm-ascend test for V1 Engine
|
||||
working-directory: ./vllm-ascend
|
||||
env:
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
@@ -102,3 +134,22 @@ jobs:
|
||||
tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC \
|
||||
tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_TP2_WITH_EP \
|
||||
tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_W8A8_WITH_EP
|
||||
|
||||
- name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct)
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run
|
||||
chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run
|
||||
/tmp/Ascend-BiSheng-toolkit_aarch64.run --install
|
||||
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
|
||||
python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl"
|
||||
|
||||
- name: Run vllm-project/vllm-ascend Qwen3 Next test
|
||||
working-directory: ./vllm-ascend
|
||||
shell: bash -el {0}
|
||||
env:
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
|
||||
pytest -sv tests/e2e/multicard/test_qwen3_next.py
|
||||
15
tests/e2e/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml
Normal file
15
tests/e2e/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
model_name: "Qwen/Qwen3-Next-80B-A3B-Instruct"
|
||||
hardware: "Atlas A2 Series"
|
||||
model: "vllm"
|
||||
tasks:
|
||||
- name: "ceval-valid_accountant"
|
||||
metrics:
|
||||
- name: "acc,none"
|
||||
value: 0.98
|
||||
max_model_len: 4096
|
||||
tensor_parallel_size: 4
|
||||
gpu_memory_utilization: 0.7
|
||||
enable_expert_parallel: True
|
||||
enforce_eager: True
|
||||
batch_size: 1
|
||||
num_fewshot: 5
|
||||
38
tests/e2e/multicard/test_qwen3_next.py
Normal file
38
tests/e2e/multicard/test_qwen3_next.py
Normal file
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# Copyright 2023 The vLLM team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
|
||||
#
|
||||
"""Compare the short outputs of HF and vLLM when using greedy sampling.
|
||||
|
||||
Run `pytest tests/e2e/multicard/test_qwen3_next.py`.
|
||||
"""
|
||||
|
||||
from tests.e2e.conftest import VllmRunner
|
||||
|
||||
|
||||
def test_models_distributed_Qwen3_NEXT_TP4():
|
||||
example_prompts = [
|
||||
"Hello, my name is",
|
||||
]
|
||||
max_tokens = 5
|
||||
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
|
||||
tensor_parallel_size=4,
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.7,
|
||||
distributed_executor_backend="mp",
|
||||
enforce_eager=True) as vllm_model:
|
||||
vllm_model.generate_greedy(example_prompts, max_tokens)
|
||||
Reference in New Issue
Block a user