[CI] lint and ut use self_hosted runner (#5652)

### What this PR does / why we need it?
lint and ut use self_hosted runner

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
Li Wang
2026-01-09 14:26:14 +08:00
committed by GitHub
parent 36d74aba58
commit 64904ab5b6
4 changed files with 40 additions and 25 deletions

View File

@@ -80,27 +80,25 @@ jobs:
name: unit test
# only trigger unit test after lint passed and the change is e2e and ut related.
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
runs-on: ubuntu-22.04-arm
runs-on: linux-amd64-cpu-16-hk
container:
# fixme: vllm-ascend install failed with 8.3.rc2 on github action
image: quay.io/ascend/cann:8.2.rc2-910b-ubuntu22.04-py3.11
image: quay.nju.edu.cn/ascend/cann:8.2.rc2-910b-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
SOC_VERSION: ascend910b1
MAX_JOBS: 4
strategy:
matrix:
vllm_version: [2f4e6548efec402b913ffddc8726230d9311948d, v0.13.0]
steps:
- name: Free up disk space
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
with:
tool-cache: true
docker-images: false
- name: Install packages
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
@@ -114,7 +112,7 @@ jobs:
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty python3 -m pip install .
VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/
python3 -m pip uninstall -y triton
- name: Checkout vllm-project/vllm-ascend repo
@@ -123,9 +121,9 @@ jobs:
- name: Install vllm-project/vllm-ascend
run: |
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/devlib
python3 -m pip install -r requirements-dev.txt
python3 -m pip install -v .
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/
python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/
- name: Install Ascend toolkit & triton_ascend
shell: bash -l {0}
@@ -141,7 +139,7 @@ jobs:
VLLM_WORKER_MULTIPROC_METHOD: spawn
TORCH_DEVICE_BACKEND_AUTOLOAD: 0
run: |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/devlib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut \
--ignore tests/ut/model_loader/netloader/test_netloader_elastic.py \
--ignore tests/ut/kv_connector/test_remote_prefill_lifecycle.py \