Files
xc-llm-ascend/.github/workflows/_unit_test.yaml
SILONG ZENG 90aa048e60 [CI] Skip test_mooncake_layerwise_connector.py in ut (#7147)
### What this PR does / why we need it?
The `test_mooncake_layerwise_connector.py` file in the `ut` test will be
skipped for now and fixed later.

- vLLM version: v0.16.0
- vLLM main:
4034c3d32e

Signed-off-by: MrZ20 <2609716663@qq.com>
2026-03-11 11:46:29 +08:00

89 lines
3.3 KiB
YAML

name: 'unit test'
on:
workflow_call:
inputs:
vllm:
required: true
type: string
runner:
required: true
type: string
image:
required: true
type: string
type:
required: true
type: string
jobs:
unit-test:
name: unit test
runs-on: ${{ inputs.runner }}
container:
image: ${{ inputs.image }}
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
SOC_VERSION: ascend910b1
MAX_JOBS: 4
COMPILE_CUSTOM_KERNELS: 0
steps:
- name: Install packages
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/
python3 -m pip uninstall -y triton
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Install vllm-project/vllm-ascend
run: |
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/
python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/
- name: Run unit test
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
TORCH_DEVICE_BACKEND_AUTOLOAD: 0
run: |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut \
--ignore tests/ut/model_loader/netloader/test_netloader_elastic.py \
--ignore tests/ut/kv_connector/test_remote_prefill_lifecycle.py \
--ignore tests/ut/kv_connector/test_remote_decode_lifecycle.py \
--ignore tests/ut/core/test_scheduler_dynamic_batch.py \
--ignore tests/ut/kv_connector/test_mooncake_connector.py \
--ignore tests/ut/worker/test_worker_v1.py \
--ignore tests/ut/spec_decode/test_mtp_proposer.py \
--ignore tests/ut/kv_connector/test_mooncake_layerwise_connector.py
- name: Upload coverage to Codecov
# only upload coverage when commits merged
if: ${{ inputs.type == 'schedule' }}
uses: codecov/codecov-action@v5
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: unittests
name: vllm-ascend
verbose: true