Bump vLLM version to v0.11.2 What's broken and changed by vLLM: 1. structured_output is broken by https://github.com/vllm-project/vllm/pull/26866 2. get_mrope_input_positions is broken by https://github.com/vllm-project/vllm/pull/28399 3. graph mode is broken by https://github.com/vllm-project/vllm/pull/25110 we'll upgrade torch to 2.8 to fix the problem later 4. embedding is broken by https://github.com/vllm-project/vllm/pull/27583 5. `get_attn_backend_cls` and attention backend is broken are broken by https://github.com/vllm-project/vllm/pull/28534 6. spec decode is broken by https://github.com/vllm-project/vllm/pull/28771 7. sp feature is broken by https://github.com/vllm-project/vllm/pull/27126 8. mtp is broken by https://github.com/vllm-project/vllm/pull/27922 9. lora is broken by https://github.com/vllm-project/vllm/pull/21068 10. execute_model is broken by https://github.com/vllm-project/vllm/pull/26866 11. `VLLM_DISABLE_SHARED_EXPERTS_STREAM` env is broken by https://github.com/vllm-project/vllm/pull/28159 12. kv cahe is broken by https://github.com/vllm-project/vllm/pull/27753 13. dp is broken by https://github.com/vllm-project/vllm/pull/25110 What's broken and changed by ourself: 1. qwen vl is broken by https://github.com/vllm-project/vllm/pull/28455 We'll remove model files in the future to avoid this kind of error 2. Engine core is broken by https://github.com/vllm-project/vllm/pull/23691 We'll remove the patch file in the future. 3. Ascend scheduler is broken by https://github.com/vllm-project/vllm/pull/28733 We'll remove ascend scheudler later. 4. qwen3-next is broken by https://github.com/vllm-project/vllm/pull/28083 We'll remove model files in the future to avoid this kind of error 5. qwen vl is broken by https://github.com/vllm-project/vllm/pull/27764. We'll remove model files in the future Known issue: 1. ray doesn't work 2. the accuracy of qwen3-next is not correct 3. qwen3-vl is broken 4. prefix cache+ ascend scheduler + deepseek v2 lite is broken. Co-authored-by: MengqingCao <cmq0113@163.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: leo-pony <nengjunma@outlook.com> Co-authored-by: 22dimensions <waitingwind@foxmail.com> Co-authored-by: shen-shanshan <467638484@qq.com> - vLLM version: v0.11.2 --------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: MengqingCao <cmq0113@163.com> Signed-off-by: hfadzxy <starmoon_zhang@163.com> Signed-off-by: leo-pony <nengjunma@outlook.com> Co-authored-by: MengqingCao <cmq0113@163.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: leo-pony <nengjunma@outlook.com>
133 lines
4.4 KiB
YAML
133 lines
4.4 KiB
YAML
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
# This workflow related to the resources atlas 800 A2
|
|
# We will not limit the concurrency of jobs on A2
|
|
name: 'ascend test / nightly-a2'
|
|
|
|
on:
|
|
schedule:
|
|
# Run test at 24:00 Beijing time (UTC+8)
|
|
- cron: "0 16 * * *"
|
|
workflow_dispatch:
|
|
pull_request:
|
|
branches:
|
|
- 'main'
|
|
types: [ labeled, synchronize ]
|
|
|
|
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
|
|
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
|
|
# It's used to activate ascend-toolkit environment variables.
|
|
defaults:
|
|
run:
|
|
shell: bash -el {0}
|
|
|
|
# only cancel in-progress runs of the same workflow
|
|
concurrency:
|
|
group: ascend-nightly-${{ github.ref }}-a2
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
multi-node-tests:
|
|
name: multi-node
|
|
if: always() && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
|
|
strategy:
|
|
fail-fast: false
|
|
max-parallel: 1
|
|
matrix:
|
|
test_config:
|
|
- name: multi-node-deepseek-dp
|
|
config_file_path: DeepSeek-R1-W8A8-A2.yaml
|
|
size: 2
|
|
- name: multi-node-deepseek-dp-torchair
|
|
config_file_path: DeepSeek-R1-W8A8-A2-torchair.yaml
|
|
size: 2
|
|
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
|
|
with:
|
|
soc_version: a2
|
|
runner: linux-aarch64-a2-0
|
|
image: 'swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/vllm-ascend:nightly-a2'
|
|
replicas: 1
|
|
size: ${{ matrix.test_config.size }}
|
|
config_file_path: ${{ matrix.test_config.config_file_path }}
|
|
secrets:
|
|
KUBECONFIG_B64: ${{ secrets.KUBECONFIG_A2_B64 }}
|
|
|
|
single-node-tests:
|
|
name: single-node
|
|
if: always() && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
|
|
needs: multi-node-tests
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
test_config:
|
|
- name: qwen3-32b
|
|
os: linux-aarch64-a2-4
|
|
tests: tests/e2e/nightly/models/test_qwen3_32b.py
|
|
- name: qwen3-32b-in8-a2
|
|
os: linux-aarch64-a2-4
|
|
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
|
|
- name: test_custom_op
|
|
os: linux-aarch64-a2-1
|
|
tests: tests/e2e/nightly/ops
|
|
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
|
|
with:
|
|
vllm: v0.11.2
|
|
runner: ${{ matrix.test_config.os }}
|
|
tests: ${{ matrix.test_config.tests }}
|
|
image: 'swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/vllm-ascend:nightly-a2'
|
|
|
|
single-node-accuracy-tests:
|
|
if: >-
|
|
${{
|
|
github.event_name == 'schedule' ||
|
|
github.event_name == 'workflow_dispatch' ||
|
|
(
|
|
contains(github.event.pull_request.labels.*.name, 'accuracy-test') &&
|
|
contains(github.event.pull_request.labels.*.name, 'ready-for-test')
|
|
)
|
|
}}
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
test_config:
|
|
- os: linux-aarch64-a2-1
|
|
model_list:
|
|
- Qwen3-8B
|
|
- Qwen2.5-VL-7B-Instruct
|
|
- Qwen2-Audio-7B-Instruct
|
|
- Qwen3-8B-W8A8
|
|
- Qwen3-VL-8B-Instruct
|
|
- Qwen2.5-Omni-7B
|
|
- Meta-Llama-3.1-8B-Instruct
|
|
- os: linux-aarch64-a2-2
|
|
model_list:
|
|
- Qwen3-30B-A3B
|
|
- Qwen3-VL-30B-A3B-Instruct
|
|
- DeepSeek-V2-Lite
|
|
- Qwen3-30B-A3B-W8A8
|
|
- os: linux-aarch64-a2-4
|
|
model_list:
|
|
- Qwen3-Next-80B-A3B-Instruct
|
|
uses: ./.github/workflows/_e2e_nightly_single_node_models.yaml
|
|
with:
|
|
vllm: v0.11.2
|
|
runner: ${{ matrix.test_config.os }}
|
|
model_list: ${{ toJson(matrix.test_config.model_list) }}
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11
|
|
upload: false
|