Files
xc-llm-ascend/.github/workflows/_e2e_test.yaml
Li Wang 99e1ea0fe6 [v0.18.0][Misc] Upgrade torch_npu to pre-release built version (#7918)
### What this PR does / why we need it?
This PR upgrades the `torch_npu` (PTA) version in multiple Dockerfiles
to a pre-release build. It introduces logic to dynamically select the
correct wheel based on the Python version and system architecture.

### Does this PR introduce _any_ user-facing change?
No.

### How was this patch tested?
CI passed with existing tests. The author should verify that the Docker
images build successfully for all supported architectures and Python
versions.

---------

Signed-off-by: wangli <wangli858794774@gmail.com>
2026-04-01 22:41:09 +08:00

732 lines
28 KiB
YAML

name: 'e2e test'
on:
workflow_call:
inputs:
vllm:
required: true
type: string
image:
required: true
type: string
type:
required: true
type: string
contains_310:
required: true
type: boolean
continue_on_error:
required: false
type: boolean
default: false
env:
UV_INDEX_URL: http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
UV_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
UV_INDEX_STRATEGY: unsafe-best-match
UV_NO_CACHE: 1
UV_SYSTEM_PYTHON: 1
jobs:
e2e-light:
name: singlecard-light
if: ${{ inputs.type == 'light' }}
runs-on: linux-aarch64-a2b3-1
strategy:
fail-fast: false
matrix:
part: [0]
container:
image: ${{ inputs.image }}
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HF_HUB_OFFLINE: 1
steps:
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev clang-15
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test
env:
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-singlecard-light \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
--auto-upgrade-estimated-times \
--continue-on-error \
2>&1 | tee /tmp/e2e-singlecard-light-part${{ matrix.part }}.log
else
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-singlecard-light \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
2>&1 | tee /tmp/e2e-singlecard-light-part${{ matrix.part }}.log
fi
exit ${PIPESTATUS[0]}
- name: Summarize singlecard-light failure
if: ${{ always() }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run singlecard-light test" \
--log-file /tmp/e2e-singlecard-light-part${{ matrix.part }}.log \
--output "$GITHUB_STEP_SUMMARY"
- name: Upload timing data
uses: actions/upload-artifact@v4
if: ${{ inputs.continue_on_error == true && github.event_name != 'pull_request' }}
with:
name: timing-data-singlecard-light-part${{ matrix.part }}
path: test_timing_data.json
if-no-files-found: warn
retention-days: 5
e2e-full:
name: singlecard-full
if: ${{ inputs.type == 'full' }}
runs-on: linux-aarch64-a2b3-1
strategy:
fail-fast: false
matrix:
part: [0, 1]
container:
image: ${{ inputs.image }}
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HF_HUB_OFFLINE: 1
MODELSCOPE_HUB_FILE_LOCK: False
steps:
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev clang-15
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run e2e test
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
shell: bash
run: |
set -o pipefail
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-singlecard \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 2 \
--auto-upgrade-estimated-times \
--continue-on-error \
2>&1 | tee /tmp/e2e-singlecard-full-part${{ matrix.part }}.log
else
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-singlecard \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 2 \
2>&1 | tee /tmp/e2e-singlecard-full-part${{ matrix.part }}.log
fi
exit ${PIPESTATUS[0]}
- name: Summarize singlecard-full failure
if: ${{ always() }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run singlecard-full test" \
--log-file /tmp/e2e-singlecard-full-part${{ matrix.part }}.log \
--output "$GITHUB_STEP_SUMMARY"
- name: Upload timing data
uses: actions/upload-artifact@v4
if: ${{ inputs.continue_on_error == true && github.event_name != 'pull_request' }}
with:
name: timing-data-singlecard-full-part${{ matrix.part }}
path: test_timing_data.json
if-no-files-found: warn
retention-days: 5
e2e-2-cards-light:
name: multicard-2-light
if: ${{ inputs.type == 'light' }}
runs-on: linux-aarch64-a3-2
strategy:
fail-fast: false
matrix:
part: [0]
container:
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-a3-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HCCL_BUFFSIZE: 1024
HF_HUB_OFFLINE: 1
steps:
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev clang-15
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test (light)
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-2card-light \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
--auto-upgrade-estimated-times \
--continue-on-error \
2>&1 | tee /tmp/e2e-2card-light-part${{ matrix.part }}.log
else
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-2card-light \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
2>&1 | tee /tmp/e2e-2card-light-part${{ matrix.part }}.log
fi
exit ${PIPESTATUS[0]}
- name: Summarize multicard-2-light failure
if: ${{ always() }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run multicard-2-light test" \
--log-file /tmp/e2e-2card-light-part${{ matrix.part }}.log \
--output "$GITHUB_STEP_SUMMARY"
- name: Upload timing data
uses: actions/upload-artifact@v4
if: ${{ inputs.continue_on_error == true && github.event_name != 'pull_request' }}
with:
name: timing-data-2card-light-part${{ matrix.part }}
path: test_timing_data.json
if-no-files-found: warn
retention-days: 5
e2e-2-cards-full:
name: multicard-2-full
if: ${{ inputs.type == 'full' }}
runs-on: linux-aarch64-a3-2
strategy:
fail-fast: false
matrix:
part: [0]
container:
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-a3-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HCCL_BUFFSIZE: 1024
HF_HUB_OFFLINE: 1
steps:
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev clang-15
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test (full)
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-multicard-2-cards \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
--auto-upgrade-estimated-times \
--continue-on-error \
2>&1 | tee /tmp/e2e-2card-full-part${{ matrix.part }}.log
else
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-multicard-2-cards \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
2>&1 | tee /tmp/e2e-2card-full-part${{ matrix.part }}.log
fi
exit ${PIPESTATUS[0]}
- name: Summarize multicard-2-full failure
if: ${{ always() }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run multicard-2-full test " \
--log-file /tmp/e2e-2card-full-part${{ matrix.part }}.log \
--output "$GITHUB_STEP_SUMMARY"
- name: Upload timing data
uses: actions/upload-artifact@v4
if: ${{ inputs.continue_on_error == true && github.event_name != 'pull_request' }}
with:
name: timing-data-2card-full-part${{ matrix.part }}
path: test_timing_data.json
if-no-files-found: warn
retention-days: 5
- name: Run vllm-project/vllm-ascend test (non triton)
if: ${{ inputs.type == 'full' && matrix.part == 0 }}
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
python3 -m pip uninstall -y triton-ascend
pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_aclgraph_capture_replay.py \
2>&1 | tee /tmp/e2e-non-triton.log
exit ${PIPESTATUS[0]}
- name: Summarize non-triton failure
if: ${{ always() && inputs.type == 'full' && matrix.part == 0 }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run multicard-2-full test (non triton)" \
--log-file /tmp/e2e-non-triton.log \
--output "$GITHUB_STEP_SUMMARY"
e2e-4-cards-full:
name: multicard-4-full
if: ${{ inputs.type == 'full' }}
runs-on: linux-aarch64-a3-4
strategy:
fail-fast: false
matrix:
part: [0]
container:
image: m.daocloud.io/quay.io/ascend/cann:8.5.1-a3-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HF_HUB_OFFLINE: 1
steps:
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev clang-15
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test for V1 Engine
env:
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-multicard-4-cards \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
--auto-upgrade-estimated-times \
--continue-on-error \
2>&1 | tee /tmp/e2e-4card-full-part${{ matrix.part }}.log
else
python3 .github/workflows/scripts/run_suite.py \
--suite e2e-multicard-4-cards \
--auto-partition-id "${{ matrix.part }}" \
--auto-partition-size 1 \
2>&1 | tee /tmp/e2e-4card-full-part${{ matrix.part }}.log
fi
exit ${PIPESTATUS[0]}
- name: Summarize multicard-4-full failure
if: ${{ always() }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run vllm-project/vllm-ascend test for V1 Engine" \
--log-file /tmp/e2e-4card-full-part${{ matrix.part }}.log \
--output "$GITHUB_STEP_SUMMARY"
- name: Upload timing data
uses: actions/upload-artifact@v4
if: ${{ inputs.continue_on_error == true && github.event_name != 'pull_request' }}
with:
name: timing-data-4card-full-part${{ matrix.part }}
path: test_timing_data.json
if-no-files-found: warn
retention-days: 5
e2e_310p:
name: 310p singlecard
runs-on: linux-aarch64-310p-1
if: ${{ inputs.contains_310 }}
container:
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-310p-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HF_HUB_OFFLINE: 1
steps:
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test
env:
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
pytest -sv --durations=0 tests/e2e/310p/singlecard/test_dense_model_singlecard.py \
tests/e2e/310p/singlecard/test_vl_model_singlecard.py \
2>&1 | tee /tmp/e2e-310p-singlecard.log
exit ${PIPESTATUS[0]}
- name: Summarize 310p singlecard failure
if: ${{ always() && inputs.contains_310 }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run vllm-project/vllm-ascend test" \
--log-file /tmp/e2e-310p-singlecard.log \
--output "$GITHUB_STEP_SUMMARY"
e2e_310p-4cards:
name: 310p multicards 4cards
runs-on: linux-aarch64-310p-4
if: ${{ inputs.contains_310 }}
container:
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-310p-ubuntu22.04-py3.11
env:
VLLM_LOGGING_LEVEL: ERROR
VLLM_USE_MODELSCOPE: True
HF_HUB_OFFLINE: 1
steps:
- name: Check npu and CANN info
run: |
npu-smi info
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
- name: Config mirrors
run: |
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
apt-get update -y
apt install git -y
git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend
- name: Checkout vllm-project/vllm-ascend repo
uses: actions/checkout@v6
- name: Install system dependencies
run: |
apt-get -y install `cat packages.txt`
apt-get -y install gcc g++ cmake libnuma-dev
pip install uv
- name: Checkout vllm-project/vllm repo
uses: actions/checkout@v6
with:
repository: vllm-project/vllm
ref: ${{ inputs.vllm }}
path: ./vllm-empty
fetch-depth: 1
- name: Install vllm-project/vllm from source
working-directory: ./vllm-empty
run: |
VLLM_TARGET_DEVICE=empty uv pip install -e .
uv pip uninstall triton
- name: Install vllm-project/vllm-ascend
env:
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
run: |
pip install uc-manager
uv pip install -r requirements-dev.txt
uv pip install -v -e .
uv pip install https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/torch_npu-2.9.0.post1%2Bgitee7ba04-cp311-cp311-manylinux_2_28_aarch64.whl
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
- name: Run vllm-project/vllm-ascend test
env:
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
VLLM_WORKER_MULTIPROC_METHOD: spawn
shell: bash
run: |
set -o pipefail
pytest -sv --durations=0 \
tests/e2e/310p/multicard/test_dense_model_multicard.py \
tests/e2e/310p/multicard/test_moe_model_multicard.py \
tests/e2e/310p/multicard/test_vl_model_multicard.py \
2>&1 | tee /tmp/e2e-310p-4cards.log
exit ${PIPESTATUS[0]}
- name: Summarize 310p multicards failure
if: ${{ always() && inputs.contains_310 }}
run: |
python3 .github/workflows/scripts/ci_log_summary.py \
--step-name "Run vllm-project/vllm-ascend test" \
--log-file /tmp/e2e-310p-4cards.log \
--output "$GITHUB_STEP_SUMMARY"