[CI] Use CI pool (#428)

Use CI pool instead of self-host for e2e test to speed up CI.

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-03-29 12:42:59 +08:00
committed by GitHub
parent ca8b1c3e47
commit b6499ed97d

View File

@@ -25,7 +25,7 @@ on:
paths:
- '*.txt'
- '**/*.py'
- '.github/workflows/vllm_ascend_test.yaml'
- '.github/workflows/vllm_ascend_test_main.yaml'
- '!docs/**'
- 'pytest.ini'
@@ -37,69 +37,30 @@ defaults:
shell: bash -el {0}
jobs:
dispatch:
name: vLLM Ascend test (dispatch)
runs-on: ascend-ci-arm64
outputs:
number: ${{ steps.dispatch-device.outputs.number }}
steps:
- name: vLLM Ascend test (dispatch)
id: dispatch-device
run: |
# Try to acquire lock to dispatch devices
lockfile /tmp/dispatch.lock
# Print npu info
npu-list /dev/null 2>&1
# Select first available device (Skip reserved davinci1 and davinci0)
NUMBER=$(npu-list /dev/null 2>&1 | grep None | grep -v davinci1 | grep -v davinci0 | head -1 | cut -b 15)
echo "Dispatch to /dev/davinci$NUMBER"
echo "number=$NUMBER" >> $GITHUB_OUTPUT
test:
needs: [dispatch]
name: vLLM Ascend test (self-host)
runs-on: ascend-ci-arm64 # actionlint-ignore: runner-label
runs-on: linux-arm64-npu-1 # actionlint-ignore: runner-label
container:
image: quay.io/ascend/cann:8.0.0-910b-ubuntu22.04-py3.10
volumes:
- /usr/local/dcmi:/usr/local/dcmi
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
- /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/
# Use self-host cache speed up pip and model download
- /home/action/cache:/github/home/.cache/
# for dispatch lock
- /tmp/:/tmp/
# for vllm and vllm-ascend
- /data1/code:/code
options: >-
--device /dev/davinci${{ needs.dispatch.outputs.number }}
--device /dev/davinci_manager
--device /dev/devmm_svm
--device /dev/hisi_hdc
env:
HF_ENDPOINT: https://hf-mirror.com
steps:
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
# unlock
rm -rf /tmp/dispatch.lock
- name: Config mirrors
run: |
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
apt-get update -y
apt install git -y
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v4
- name: Install system dependencies
run: |
apt-get update -y
apt-get -y install `cat packages.txt`
- name: Install dependencies
@@ -123,32 +84,38 @@ jobs:
- name: Install pta
run: |
cd /code/pta/
pip install ./torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
if [ ! -d /root/.cache/pta ]; then
mkdir -p /root/.cache/pta
fi
if [ ! -f /root/.cache/pta/torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl ]; then
cd /root/.cache/pta
rm -rf pytorch_v2.5.1_py310*
wget https://pytorch-package.obs.cn-north-4.myhuaweicloud.com/pta/Daily/v2.5.1/20250320.3/pytorch_v2.5.1_py310.tar.gz
tar -zxvf pytorch_v2.5.1_py310.tar.gz
fi
pip install /root/.cache/pta/torch_npu-2.5.1.dev20250320-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
- name: Run vllm-project/vllm-ascend test for V0 Engine
env:
VLLM_USE_V1: 0
HF_ENDPOINT: https://hf-mirror.com
run: |
VLLM_USE_V1=0 pytest -sv tests
- name: Run vllm-project/vllm-ascend test for V1 Engine
env:
VLLM_USE_V1: 1
VLLM_WORKER_MULTIPROC_METHOD: spawn
HF_ENDPOINT: https://hf-mirror.com
run: |
VLLM_USE_V1=1 VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -sv tests
pytest -sv tests
- name: Run vllm-project/vllm test for V0 Engine
env:
VLLM_USE_V1: 0
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
HF_ENDPOINT: https://hf-mirror.com
run: |
pytest -sv
post_cleanup:
name: vLLM Ascend test (post-cleanup)
needs: [test]
runs-on: ascend-ci-arm64 # actionlint-ignore: runner-label
if: always()
steps:
- name: Remove dispatch lock if exists
run: |
if [ -f "/tmp/dispatch.lock" ]; then
rm -f "/tmp/dispatch.lock"
fi