diff --git a/.github/workflows/_accuracy_test.yaml b/.github/workflows/_accuracy_test.yaml index 4b4e199b..10bb351f 100644 --- a/.github/workflows/_accuracy_test.yaml +++ b/.github/workflows/_accuracy_test.yaml @@ -73,6 +73,16 @@ jobs: working-directory: ./vllm-empty run: | VLLM_TARGET_DEVICE=empty pip install -e . + + - name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct) + if: ${{ inputs.model_name == 'Qwen3-Next-80B-A3B-Instruct' }} + shell: bash -l {0} + run: | + wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run + chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run + /tmp/Ascend-BiSheng-toolkit_aarch64.run --install + . /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh + python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl" - name: Resolve vllm-ascend version run: | diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 6d0faf83..a7d3c786 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -57,6 +57,8 @@ jobs: model_name: Qwen3-VL-30B-A3B-Instruct - runner: a2-2 model_name: DeepSeek-V2-Lite + - runner: a2-4 + model_name: Qwen3-Next-80B-A3B-Instruct fail-fast: false # test will be triggered when tag 'accuracy-test' & 'ready-for-test' if: >- diff --git a/.github/workflows/vllm_ascend_dist.yaml b/.github/workflows/vllm_ascend_dist.yaml index 1c0a8f0e..f78486e1 100644 --- a/.github/workflows/vllm_ascend_dist.yaml +++ b/.github/workflows/vllm_ascend_dist.yaml @@ -19,8 +19,36 @@ name: 'e2e test / a3-test' on: workflow_call: - + push: + branches: + - 'main' + - '*-dev' + paths: + - '.github/workflows/vllm_ascend_dist.yml' + - 'tests/e2e/multicard/**' + - 'Dockerfile' + - 'vllm_ascend/**' + - 'setup.py' + - 'pyproject.toml' + - 'requirements.txt' + - 'cmake/**' + - 'CMakeLists.txt' + - 'csrc/**' pull_request: + branches: + - 'main' + - '*-dev' + paths: + - '.github/workflows/vllm_ascend_dist.yml' + - 'tests/e2e/multicard/**' + - 'Dockerfile' + - 'vllm_ascend/**' + - 'setup.py' + - 'pyproject.toml' + - 'requirements.txt' + - 'cmake/**' + - 'CMakeLists.txt' + - 'csrc/**' types: [ labeled ] # Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly @@ -39,10 +67,10 @@ concurrency: jobs: e2e: # only trigger e2e test after lint passed and the change is e2e related with pull request. - if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' }} + if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'push'}} strategy: matrix: - os: [linux-aarch64-a3-8] + os: [linux-aarch64-a3-4] vllm_version: [v0.11.0] name: vLLM Ascend test runs-on: ${{ matrix.os }} @@ -61,11 +89,13 @@ jobs: sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple apt-get update -y - apt install git -y + apt install git wget curl -y git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/ - name: Checkout vllm-project/vllm-ascend repo uses: actions/checkout@v4 + with: + path: ./vllm-ascend - name: Install system dependencies run: | @@ -85,6 +115,7 @@ jobs: VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend + working-directory: ./vllm-ascend run: | export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib @@ -92,6 +123,7 @@ jobs: pip install -v -e . - name: Run vllm-project/vllm-ascend test for V1 Engine + working-directory: ./vllm-ascend env: VLLM_WORKER_MULTIPROC_METHOD: spawn VLLM_USE_MODELSCOPE: True @@ -102,3 +134,22 @@ jobs: tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC \ tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_TP2_WITH_EP \ tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_W8A8_WITH_EP + + - name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct) + shell: bash -l {0} + run: | + wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run + chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run + /tmp/Ascend-BiSheng-toolkit_aarch64.run --install + . /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh + python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl" + + - name: Run vllm-project/vllm-ascend Qwen3 Next test + working-directory: ./vllm-ascend + shell: bash -el {0} + env: + VLLM_WORKER_MULTIPROC_METHOD: spawn + VLLM_USE_MODELSCOPE: True + run: | + . /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh + pytest -sv tests/e2e/multicard/test_qwen3_next.py \ No newline at end of file diff --git a/tests/e2e/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml b/tests/e2e/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml new file mode 100644 index 00000000..e92ed2f0 --- /dev/null +++ b/tests/e2e/models/configs/Qwen3-Next-80B-A3B-Instruct.yaml @@ -0,0 +1,15 @@ +model_name: "Qwen/Qwen3-Next-80B-A3B-Instruct" +hardware: "Atlas A2 Series" +model: "vllm" +tasks: +- name: "ceval-valid_accountant" + metrics: + - name: "acc,none" + value: 0.98 +max_model_len: 4096 +tensor_parallel_size: 4 +gpu_memory_utilization: 0.7 +enable_expert_parallel: True +enforce_eager: True +batch_size: 1 +num_fewshot: 5 diff --git a/tests/e2e/multicard/test_qwen3_next.py b/tests/e2e/multicard/test_qwen3_next.py new file mode 100644 index 00000000..fe246d19 --- /dev/null +++ b/tests/e2e/multicard/test_qwen3_next.py @@ -0,0 +1,38 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# Copyright 2023 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py +# +"""Compare the short outputs of HF and vLLM when using greedy sampling. + +Run `pytest tests/e2e/multicard/test_qwen3_next.py`. +""" + +from tests.e2e.conftest import VllmRunner + + +def test_models_distributed_Qwen3_NEXT_TP4(): + example_prompts = [ + "Hello, my name is", + ] + max_tokens = 5 + with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct", + tensor_parallel_size=4, + max_model_len=4096, + gpu_memory_utilization=0.7, + distributed_executor_backend="mp", + enforce_eager=True) as vllm_model: + vllm_model.generate_greedy(example_prompts, max_tokens)