# # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is a part of the vllm-ascend project. # name: 'test' on: schedule: - cron: '0 23 * * *' pull_request: branches: - 'main' - '*-dev' paths: - '*.txt' - '**/*.py' - '.github/workflows/vllm_ascend_test.yaml' - '!docs/**' - 'pytest.ini' - '!benchmarks/**' - 'tools/mypy.sh' - 'mypy.ini' - '.github/workflows/*.ya?ml' - '.github/workflows/actionlint.*' - '.github/workflows/matchers/actionlint.json' # Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly # declared as "shell: bash -el {0}" on steps that need to be properly activated. # It's used to activate ascend-toolkit environment variables. defaults: run: shell: bash -el {0} # only cancel in-progress runs of the same workflow # and ignore the lint / 1 card / 4 cards test type concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: lint: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.10"] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements-lint.txt - name: Run codespell check run: | CODESPELL_EXCLUDES=('--skip' 'tests/prompts/**,./benchmarks/sonnet.txt,*tests/lora/data/**,build/**,./vllm_ascend.egg-info/**') CODESPELL_IGNORE_WORDS=('-L' 'CANN,cann,NNAL,nnal,ASCEND,ascend,EnQue,CopyIn') codespell --toml pyproject.toml "${CODESPELL_EXCLUDES[@]}" "${CODESPELL_IGNORE_WORDS[@]}" - name: Analysing the code with ruff run: | echo "::add-matcher::.github/workflows/matchers/ruff.json" ruff check --output-format github . - name: Run isort run: | isort . --check-only - name: Running yapf run: | python -m pip install --upgrade pip pip install toml pip install yapf==0.32.0 yapf --diff --recursive . - name: Install dependencies run: | pip install -r requirements-dev.txt --extra-index-url https://download.pytorch.org/whl/cpu - name: Checkout vllm-project/vllm repo uses: actions/checkout@v4 with: repository: vllm-project/vllm path: vllm-empty - name: Actionlint Check env: SHELLCHECK_OPTS: --exclude=SC2046,SC2006,SC2086 run: | echo "::add-matcher::.github/workflows/matchers/actionlint.json" tools/actionlint.sh -color - name: Install vllm-project/vllm from source working-directory: vllm-empty run: | pip install -r requirements/build.txt --extra-index-url https://download.pytorch.org/whl/cpu VLLM_TARGET_DEVICE=empty pip install . - name: Mypy Check run: | echo "::add-matcher::.github/workflows/matchers/mypy.json" tools/mypy.sh 1 ${{ matrix.python-version }} ut: needs: [lint] name: unit test if: ${{ needs.lint.result == 'success' }} runs-on: ubuntu-latest container: image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10 env: VLLM_LOGGING_LEVEL: ERROR VLLM_USE_MODELSCOPE: True strategy: matrix: vllm_version: [main, v0.9.1] steps: - name: Install packages run: | apt-get update -y apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev - name: Checkout vllm-project/vllm repo uses: actions/checkout@v4 with: repository: vllm-project/vllm ref: ${{ matrix.vllm_version }} path: ./vllm-empty - name: Install vllm-project/vllm from source working-directory: ./vllm-empty run: | VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/ python3 -m pip uninstall -y triton - name: Checkout vllm-project/vllm-ascend repo uses: actions/checkout@v4 - name: Install vllm-project/vllm-ascend run: | export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/ python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/ - name: Run unit test for V1 Engine env: VLLM_USE_V1: 1 VLLM_WORKER_MULTIPROC_METHOD: spawn TORCH_DEVICE_BACKEND_AUTOLOAD: 0 run: | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib pytest -sv tests/ut e2e: needs: [lint] if: ${{ needs.lint.result == 'success' }} strategy: max-parallel: 2 matrix: os: [linux-arm64-npu-1] vllm_version: [main, v0.9.1] name: singlecard e2e test runs-on: ${{ matrix.os }} container: # TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10 env: VLLM_LOGGING_LEVEL: ERROR steps: - name: Check npu and CANN info run: | npu-smi info cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info - name: Config mirrors run: | sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple apt-get update -y apt install git -y git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/ - name: Checkout vllm-project/vllm-ascend repo uses: actions/checkout@v4 - name: Install system dependencies run: | apt-get -y install `cat packages.txt` apt-get -y install gcc g++ cmake libnuma-dev - name: Checkout vllm-project/vllm repo uses: actions/checkout@v4 with: repository: vllm-project/vllm ref: ${{ matrix.vllm_version }} path: ./vllm-empty - name: Install vllm-project/vllm from source working-directory: ./vllm-empty run: | VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend env: PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . - name: Run e2e test for V1 Engine env: VLLM_USE_V1: 1 VLLM_WORKER_MULTIPROC_METHOD: spawn VLLM_USE_MODELSCOPE: True run: | pytest -sv tests/e2e/singlecard/test_offline_inference.py # TODO: switch hf to modelscope VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \ pytest -sv tests/e2e/singlecard/test_ilama_lora.py # TODO(sss): guided decoding doesn't work, fix it later # pytest -sv tests/e2e/singlecard/test_guided_decoding.py pytest -sv tests/e2e/singlecard/test_camem.py pytest -sv tests/e2e/singlecard/ \ --ignore=tests/e2e/singlecard/test_offline_inference.py \ --ignore=tests/e2e/singlecard/test_ilama_lora.py \ --ignore=tests/e2e/singlecard/test_guided_decoding.py \ --ignore=tests/e2e/singlecard/test_camem.py - name: Run e2e test on V0 engine if: ${{ github.event_name == 'schedule' }} env: VLLM_USE_V1: 0 VLLM_USE_MODELSCOPE: True run: | pytest -sv tests/e2e/singlecard/test_offline_inference.py # TODO: switch hf to modelscope VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \ pytest -sv tests/e2e/singlecard/test_ilama_lora.py # guided decoding doesn't work, fix it later # pytest -sv tests/e2e/singlecard/test_guided_decoding.py pytest -sv tests/e2e/singlecard/test_camem.py pytest -sv tests/e2e/singlecard/test_prompt_embedding.py pytest -sv tests/e2e/singlecard/ \ --ignore=tests/e2e/singlecard/test_offline_inference.py \ --ignore=tests/e2e/singlecard/test_ilama_lora.py \ --ignore=tests/e2e/singlecard/test_guided_decoding.py \ --ignore=tests/e2e/singlecard/test_camem.py \ --ignore=tests/e2e/singlecard/test_prompt_embedding.py \ --ignore=tests/e2e/singlecard/core/test_ascend_scheduler.py \ --ignore=tests/e2e/singlecard/core/test_ascend_scheduler_e2e.py e2e-4-cards: needs: [e2e] if: ${{ needs.e2e.result == 'success' }} strategy: max-parallel: 1 matrix: os: [linux-arm64-npu-4] vllm_version: [main, v0.9.1] name: multicard e2e test runs-on: ${{ matrix.os }} container: # TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10 env: VLLM_LOGGING_LEVEL: ERROR VLLM_USE_MODELSCOPE: True steps: - name: Check npu and CANN info run: | npu-smi info cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info - name: Config mirrors run: | sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple apt-get update -y apt install git -y git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/ - name: Checkout vllm-project/vllm-ascend repo uses: actions/checkout@v4 - name: Install system dependencies run: | apt-get -y install `cat packages.txt` apt-get -y install gcc g++ cmake libnuma-dev - name: Checkout vllm-project/vllm repo uses: actions/checkout@v4 with: repository: vllm-project/vllm ref: ${{ matrix.vllm_version }} path: ./vllm-empty - name: Install vllm-project/vllm from source working-directory: ./vllm-empty run: | VLLM_TARGET_DEVICE=empty pip install -e . - name: Install vllm-project/vllm-ascend env: PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi run: | pip install -r requirements-dev.txt pip install -v -e . - name: Run vllm-project/vllm-ascend test for V1 Engine env: VLLM_USE_V1: 1 VLLM_WORKER_MULTIPROC_METHOD: spawn VLLM_USE_MODELSCOPE: True run: | # TODO: switch hf to modelscope VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \ pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py # Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error. # To avoid oom, we need to run the test in a single process. pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_topk pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8 pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py --ignore=tests/e2e/multicard/test_offline_inference_distributed.py - name: Run vllm-project/vllm-ascend test on V0 engine if: ${{ github.event_name == 'schedule' }} env: VLLM_USE_V1: 0 VLLM_USE_MODELSCOPE: True run: | # TODO: switch hf to modelscope VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \ pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py # Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error. # To avoid oom, we need to run the test in a single process. pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_topk pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W8A8 pytest -sv tests/e2e/multicard/ --ignore=tests/e2e/multicard/test_ilama_lora_tp2.py --ignore=tests/e2e/multicard/test_offline_inference_distributed.py