### What this PR does / why we need it?
1. For all parts of the current test module involving the millisecond
download model, add the `local_file_only` parameter to specify offline
mode; this ensures that CI will not fail due to network instability.
2. Install modelscope from a fixed commit until it next release
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
check if the env or arg `local_files_only` works
1) set the env:
```shell
export HF_HUB_OFFLINE=1
```
2) run the script
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="Qwen/Qwen3-0.6B"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
it works well:
```shell
2026-03-06 06:40:12,546 - modelscope - WARNING - We can not confirm the cached file is for revision: master
The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.
{'architectures': ['Qwen3ForCausalLM'], 'attention_bias': False, 'attention_dropout': 0.0, 'bos_token_id': 151643, 'eos_token_id': 151645, 'head_dim': 128, 'hidden_act': 'silu', 'hidden_size': 1024, 'initializer_range': 0.02, 'intermediate_size': 3072, 'max_position_embeddings': 40960, 'max_window_layers': 28, 'model_type': 'qwen3', 'num_attention_heads': 16, 'num_hidden_layers': 28, 'num_key_value_heads': 8, 'rms_norm_eps': 1e-06, 'rope_scaling': None, 'rope_theta': 1000000, 'sliding_window': None, 'tie_word_embeddings': True, 'torch_dtype': 'bfloat16', 'transformers_version': '4.51.0', 'use_cache': True, 'use_sliding_window': False, 'vocab_size': 151936, '_commit_hash': None}
```
3) test the model repo does not cached locally when the env
`HF_HUB_OFFLINE`==True
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="FireRedTeam/FireRed-OCR"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
and the result is as expected:
```shell
File "/workspace/demo.py", line 12, in <module>
config_dict, _ = PretrainedConfig.get_config_dict(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 189, in patch_get_config_dict
model_dir = get_model_dir(pretrained_model_name_or_path,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 164, in get_model_dir
model_dir = snapshot_download(
^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 137, in snapshot_download
return _snapshot_download(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 283, in _snapshot_download
raise ValueError(
ValueError: Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable look-ups and downloads online, set 'local_files_only' to False
```
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
610 lines
22 KiB
YAML
610 lines
22 KiB
YAML
name: 'e2e test'
|
|
|
|
on:
|
|
workflow_call:
|
|
inputs:
|
|
vllm:
|
|
required: true
|
|
type: string
|
|
image:
|
|
required: true
|
|
type: string
|
|
type:
|
|
required: true
|
|
type: string
|
|
contains_310:
|
|
required: true
|
|
type: boolean
|
|
continue_on_error:
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
env:
|
|
UV_INDEX_URL: http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
UV_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
UV_INDEX_STRATEGY: unsafe-best-match
|
|
UV_NO_CACHE: 1
|
|
UV_SYSTEM_PYTHON: 1
|
|
|
|
jobs:
|
|
e2e-light:
|
|
name: singlecard-light
|
|
if: ${{ inputs.type == 'light' }}
|
|
runs-on: linux-aarch64-a2b3-1
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
part: [0]
|
|
container:
|
|
image: ${{ inputs.image }}
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev clang-15
|
|
|
|
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
|
|
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
|
|
- name: Run vllm-project/vllm-ascend test
|
|
env:
|
|
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-singlecard-light \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1 \
|
|
--auto-upgrade-estimated-times \
|
|
--continue-on-error
|
|
else
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-singlecard-light \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1
|
|
fi
|
|
|
|
|
|
- name: Upload timing data
|
|
uses: actions/upload-artifact@v4
|
|
if: ${{ inputs.continue_on_error == true }}
|
|
with:
|
|
name: timing-data-singlecard-light-part${{ matrix.part }}
|
|
path: test_timing_data.json
|
|
if-no-files-found: warn
|
|
retention-days: 5
|
|
|
|
e2e-full:
|
|
name: singlecard-full
|
|
if: ${{ inputs.type == 'full' }}
|
|
runs-on: linux-aarch64-a2b3-1
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
part: [0, 1]
|
|
container:
|
|
image: ${{ inputs.image }}
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HF_HUB_OFFLINE: 1
|
|
MODELSCOPE_HUB_FILE_LOCK: False
|
|
steps:
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev clang-15
|
|
|
|
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
|
|
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
- name: Run e2e test
|
|
env:
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
|
run: |
|
|
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-singlecard \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 2 \
|
|
--auto-upgrade-estimated-times \
|
|
--continue-on-error
|
|
else
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-singlecard \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 2
|
|
fi
|
|
|
|
- name: Upload timing data
|
|
uses: actions/upload-artifact@v4
|
|
if: ${{ inputs.continue_on_error == true }}
|
|
with:
|
|
name: timing-data-singlecard-full-part${{ matrix.part }}
|
|
path: test_timing_data.json
|
|
if-no-files-found: warn
|
|
retention-days: 5
|
|
|
|
e2e-2-cards-light:
|
|
name: multicard-2-light
|
|
if: ${{ inputs.type == 'light' }}
|
|
runs-on: linux-aarch64-a3-2
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
part: [0]
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-a3-ubuntu22.04-py3.11
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HCCL_BUFFSIZE: 1024
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev clang-15
|
|
|
|
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
|
|
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
- name: Run vllm-project/vllm-ascend test (light)
|
|
env:
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-2card-light \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1 \
|
|
--auto-upgrade-estimated-times \
|
|
--continue-on-error
|
|
else
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-2card-light \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1
|
|
fi
|
|
|
|
|
|
- name: Upload timing data
|
|
uses: actions/upload-artifact@v4
|
|
if: ${{ inputs.continue_on_error == true }}
|
|
with:
|
|
name: timing-data-2card-light-part${{ matrix.part }}
|
|
path: test_timing_data.json
|
|
if-no-files-found: warn
|
|
retention-days: 5
|
|
|
|
e2e-2-cards-full:
|
|
name: multicard-2-full
|
|
if: ${{ inputs.type == 'full' }}
|
|
runs-on: linux-aarch64-a3-2
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
part: [0]
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-a3-ubuntu22.04-py3.11
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HCCL_BUFFSIZE: 1024
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev clang-15
|
|
|
|
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
|
|
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
- name: Run vllm-project/vllm-ascend test (full)
|
|
env:
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-multicard-2-cards \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1 \
|
|
--auto-upgrade-estimated-times \
|
|
--continue-on-error
|
|
else
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-multicard-2-cards \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1
|
|
fi
|
|
|
|
|
|
- name: Upload timing data
|
|
uses: actions/upload-artifact@v4
|
|
if: ${{ inputs.continue_on_error == true }}
|
|
with:
|
|
name: timing-data-2card-full-part${{ matrix.part }}
|
|
path: test_timing_data.json
|
|
if-no-files-found: warn
|
|
retention-days: 5
|
|
|
|
- name: Run vllm-project/vllm-ascend test (non triton)
|
|
if: ${{ inputs.type == 'full' && matrix.part == 0 }}
|
|
env:
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
python3 -m pip uninstall -y triton-ascend
|
|
pytest -sv --durations=0 tests/e2e/multicard/2-cards/test_aclgraph_capture_replay.py
|
|
|
|
e2e-4-cards-full:
|
|
name: multicard-4-full
|
|
if: ${{ inputs.type == 'full' }}
|
|
runs-on: linux-aarch64-a3-4
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
part: [0]
|
|
container:
|
|
image: m.daocloud.io/quay.io/ascend/cann:8.5.1-a3-ubuntu22.04-py3.11
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev clang-15
|
|
|
|
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 20
|
|
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-15 20
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
|
|
- name: Run vllm-project/vllm-ascend test for V1 Engine
|
|
env:
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
if [ "${{ inputs.continue_on_error }}" = "true" ]; then
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-multicard-4-cards \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1 \
|
|
--auto-upgrade-estimated-times \
|
|
--continue-on-error
|
|
else
|
|
python3 .github/workflows/scripts/run_suite.py \
|
|
--suite e2e-multicard-4-cards \
|
|
--auto-partition-id "${{ matrix.part }}" \
|
|
--auto-partition-size 1
|
|
fi
|
|
|
|
|
|
- name: Upload timing data
|
|
uses: actions/upload-artifact@v4
|
|
if: ${{ inputs.continue_on_error == true }}
|
|
with:
|
|
name: timing-data-4card-full-part${{ matrix.part }}
|
|
path: test_timing_data.json
|
|
if-no-files-found: warn
|
|
retention-days: 5
|
|
|
|
e2e_310p:
|
|
name: 310p singlecard
|
|
runs-on: linux-aarch64-310p-1
|
|
if: ${{ inputs.contains_310 }}
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-310p-ubuntu22.04-py3.11
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
|
|
- name: Run vllm-project/vllm-ascend test
|
|
env:
|
|
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
pytest -sv --durations=0 tests/e2e/310p/singlecard/test_dense_model_singlecard.py \
|
|
tests/e2e/310p/singlecard/test_vl_model_singlecard.py
|
|
|
|
e2e_310p-4cards:
|
|
name: 310p multicards 4cards
|
|
runs-on: linux-aarch64-310p-4
|
|
if: ${{ inputs.contains_310 }}
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.1-310p-ubuntu22.04-py3.11
|
|
env:
|
|
VLLM_LOGGING_LEVEL: ERROR
|
|
VLLM_USE_MODELSCOPE: True
|
|
HF_HUB_OFFLINE: 1
|
|
steps:
|
|
- name: Check npu and CANN info
|
|
run: |
|
|
npu-smi info
|
|
cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info
|
|
- name: Config mirrors
|
|
run: |
|
|
sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list
|
|
pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple
|
|
pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local
|
|
apt-get update -y
|
|
apt install git -y
|
|
|
|
- name: Checkout vllm-project/vllm-ascend repo
|
|
uses: actions/checkout@v6
|
|
|
|
- name: Install system dependencies
|
|
run: |
|
|
apt-get -y install `cat packages.txt`
|
|
apt-get -y install gcc g++ cmake libnuma-dev
|
|
pip install uv
|
|
|
|
- name: Checkout vllm-project/vllm repo
|
|
uses: actions/checkout@v6
|
|
with:
|
|
repository: vllm-project/vllm
|
|
ref: ${{ inputs.vllm }}
|
|
path: ./vllm-empty
|
|
fetch-depth: 1
|
|
|
|
- name: Install vllm-project/vllm from source
|
|
working-directory: ./vllm-empty
|
|
run: |
|
|
VLLM_TARGET_DEVICE=empty uv pip install -e .
|
|
|
|
- name: Install vllm-project/vllm-ascend
|
|
env:
|
|
PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi
|
|
run: |
|
|
pip install uc-manager
|
|
uv pip install -r requirements-dev.txt
|
|
uv pip install -v -e .
|
|
uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe
|
|
|
|
- name: Run vllm-project/vllm-ascend test
|
|
env:
|
|
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
|
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
|
run: |
|
|
pytest -sv --durations=0 \
|
|
tests/e2e/310p/multicard/test_dense_model_multicard.py \
|
|
tests/e2e/310p/multicard/test_moe_model_multicard.py \
|
|
tests/e2e/310p/multicard/test_vl_model_multicard.py
|