1. enable lint check for all change
2. only run ut and e2e if it's the code change.
3. only run ut and disable e2e if the change is ut only.
4. disable wheel build for push case
5. run unit test when pr is merged
6. remove useless pytest.ini
- vLLM version: v0.9.2
- vLLM main:
fdfd409f8f
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
327 lines
13 KiB
YAML
327 lines
13 KiB
YAML
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
|
|
name: 'test'
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- 'main'
|
|
pull_request:
|
|
branches:
|
|
- 'main'
|
|
- '*-dev'
|
|
|
|
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
|
|
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
|
|
# It's used to activate ascend-toolkit environment variables.
|
|
defaults:
|
|
run:
|
|
shell: bash -el {0}
|
|
|
|
# only cancel in-progress runs of the same workflow
|
|
# and ignore the lint / 1 card / 4 cards test type
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
lint:
|
|
# Only trigger lint on pull request
|
|
if: ${{ github.event_name == 'pull_request' }}
|
|
uses: ./.github/workflows/pre-commit.yml
|
|
|
|
changes:
|
|
# Only trigger changes on pull request
|
|
if: ${{ github.event_name == 'pull_request' }}
|
|
runs-on: ubuntu-latest
|
|
permissions:
|
|
pull-requests: read
|
|
outputs:
|
|
e2e_tracker: ${{ steps.filter.outputs.e2e_tracker }}
|
|
ut_tracker: ${{ steps.filter.outputs.ut_tracker }}
|
|
steps:
|
|
- uses: dorny/paths-filter@v3
|
|
id: filter
|
|
with:
|
|
filters: |
|
|
e2e_tracker:
|
|
- 'vllm_ascend/**'
|
|
- 'csrc/**'
|
|
- 'cmake/**'
|
|
- 'tests/e2e/**'
|
|
- 'tests/conftest.py'
|
|
- 'tests/model_utils.py'
|
|
- 'tests/utils.py'
|
|
ut_tracker:
|
|
- 'tests/ut/**'
|
|
ut:
|
|
needs: [lint, changes]
|
|
name: unit test
|
|
# only trigger unit test after lint passed and the change is e2e and ut related. Or the PR is merged.
|
|
if: ${{ github.event_name == 'push' || (needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true')) }}
|
|
runs-on: ubuntu-latest
|
|
container:
|
|
image: quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
strategy:
|
|
matrix:
|
|
vllm_version: [main, v0.9.2]
|
|
steps:
|
|
- name: Install packages
|
|
run: |
|
|
apt-get update -y
|
|
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v4
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ matrix.vllm_version }}
|
|
path: ./vllm-empty
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/
|
|
python3 -m pip uninstall -y triton
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
run: |
|
|
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
|
|
python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/
|
|
python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/
|
|
|
|
- name: Run unit test for V1 Engine
|
|
env:
|
|
VLLM_USE_V1: 1
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
TORCH_DEVICE_BACKEND_AUTOLOAD: 0
|
|
run: |
|
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
|
|
pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut
|
|
|
|
- name: Upload coverage to Codecov
|
|
if: ${{ matrix.vllm_version == 'main' }}
|
|
uses: codecov/codecov-action@v5
|
|
env:
|
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
|
with:
|
|
flags: unittests
|
|
name: vllm-ascend
|
|
verbose: true
|
|
|
|
e2e:
|
|
needs: [lint, changes]
|
|
# only trigger e2e test after lint passed and the change is e2e related.
|
|
if: ${{ needs.lint.result == 'success' && needs.changes.outputs.e2e_tracker == 'true' }}
|
|
strategy:
|
|
max-parallel: 2
|
|
matrix:
|
|
os: [linux-arm64-npu-1]
|
|
vllm_version: [main, v0.9.2]
|
|
name: singlecard e2e test
|
|
runs-on: ${{ matrix.os }}
|
|
container:
|
|
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
|
|
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
steps:
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|
apt-get update -y
|
|
apt install git -y
|
|
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v4
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ matrix.vllm_version }}
|
|
path: ./vllm-empty
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install -r requirements-dev.txt
|
|
pip install -v -e .
|
|
|
|
- name: Run e2e test for V1 Engine
|
|
env:
|
|
VLLM_USE_V1: 1
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
VLLM_USE_MODELSCOPE: True
|
|
run: |
|
|
pytest -sv tests/e2e/singlecard/test_offline_inference.py
|
|
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
|
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
|
pytest -sv tests/e2e/singlecard/test_camem.py
|
|
pytest -sv tests/e2e/singlecard/test_embedding.py
|
|
pytest -sv tests/e2e/singlecard/ \
|
|
--ignore=tests/e2e/singlecard/test_offline_inference.py \
|
|
--ignore=tests/e2e/singlecard/test_ilama_lora.py \
|
|
--ignore=tests/e2e/singlecard/test_guided_decoding.py \
|
|
--ignore=tests/e2e/singlecard/test_camem.py \
|
|
--ignore=tests/e2e/singlecard/test_embedding.py \
|
|
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py \
|
|
--ignore=tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py
|
|
# ------------------------------------ v1 spec decode test ------------------------------------ #
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/singlecard/spec_decode_v1/test_v1_mtp_correctness.py
|
|
# TODO: revert me when test_v1_spec_decode.py::test_ngram_correctness is fixed
|
|
VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/singlecard/spec_decode_v1/test_v1_spec_decode.py
|
|
|
|
- name: Run e2e test on V0 engine
|
|
if: ${{ github.event_name == 'schedule' }}
|
|
env:
|
|
VLLM_USE_V1: 0
|
|
VLLM_USE_MODELSCOPE: True
|
|
run: |
|
|
pytest -sv tests/e2e/singlecard/test_offline_inference.py
|
|
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
|
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
|
pytest -sv tests/e2e/singlecard/test_camem.py
|
|
pytest -sv tests/e2e/singlecard/test_prompt_embedding.py
|
|
pytest -sv tests/e2e/singlecard/test_embedding.py
|
|
pytest -sv tests/e2e/singlecard/ \
|
|
--ignore=tests/e2e/singlecard/test_offline_inference.py \
|
|
--ignore=tests/e2e/singlecard/test_ilama_lora.py \
|
|
--ignore=tests/e2e/singlecard/test_guided_decoding.py \
|
|
--ignore=tests/e2e/singlecard/test_camem.py \
|
|
--ignore=tests/e2e/singlecard/test_prompt_embedding.py \
|
|
--ignore=tests/e2e/singlecard/test_embedding.py
|
|
|
|
e2e-4-cards:
|
|
needs: [e2e]
|
|
if: ${{ needs.e2e.result == 'success' }}
|
|
strategy:
|
|
max-parallel: 1
|
|
matrix:
|
|
os: [linux-arm64-npu-4]
|
|
vllm_version: [main, v0.9.2]
|
|
name: multicard e2e test
|
|
runs-on: ${{ matrix.os }}
|
|
container:
|
|
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
|
|
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
steps:
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|
apt-get update -y
|
|
apt install git -y
|
|
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v4
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ matrix.vllm_version }}
|
|
path: ./vllm-empty
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install -r requirements-dev.txt
|
|
pip install -v -e .
|
|
|
|
- name: Run vllm-project/vllm-ascend test for V1 Engine
|
|
env:
|
|
VLLM_USE_V1: 1
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
VLLM_USE_MODELSCOPE: True
|
|
run: |
|
|
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
|
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
|
|
# To avoid oom, we need to run the test in a single process.
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_dbo
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeekV3_dbo
|
|
pytest -sv tests/e2e/multicard/test_data_parallel.py
|
|
pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py \
|
|
--ignore=tests/e2e/multicard/test_offline_inference_distributed.py \
|
|
--ignore=tests/e2e/multicard/test_data_parallel.py
|
|
|
|
- name: Run vllm-project/vllm-ascend test on V0 engine
|
|
if: ${{ github.event_name == 'schedule' }}
|
|
env:
|
|
VLLM_USE_V1: 0
|
|
VLLM_USE_MODELSCOPE: True
|
|
run: |
|
|
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
|
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
|
|
# To avoid oom, we need to run the test in a single process.
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
|
|
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8
|
|
pytest -sv tests/e2e/multicard/test_data_parallel.py
|
|
pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py \
|
|
--ignore=tests/e2e/multicard/test_offline_inference_distributed.py \
|
|
--ignore=tests/e2e/multicard/test_data_parallel.py
|