161 lines
6.7 KiB
YAML
161 lines
6.7 KiB
YAML
name: PR Test (Ascend NPU)
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
paths:
|
|
- "python/**"
|
|
- "scripts/ci/**"
|
|
- "test/**"
|
|
- ".github/workflows/pr-test-npu.yml"
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- "python/**"
|
|
- "scripts/ci/**"
|
|
- "test/**"
|
|
- ".github/workflows/pr-test-npu.yml"
|
|
types: [synchronize, labeled]
|
|
workflow_dispatch:
|
|
|
|
concurrency:
|
|
group: pr-test-npu-${{ github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
per-commit-1-ascend-npu:
|
|
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'run-ci')
|
|
runs-on: linux-arm64-npu-1
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
# speed up by using infra cache services
|
|
CACHING_URL="cache-service.nginx-pypi-cache.svc.cluster.local"
|
|
sed -Ei "s@(ports|archive).ubuntu.com@${CACHING_URL}:8081@g" /etc/apt/sources.list
|
|
pip config set global.index-url http://${CACHING_URL}/pypi/simple
|
|
pip config set global.trusted-host ${CACHING_URL}
|
|
|
|
bash scripts/ci/npu_ci_install_dependency.sh
|
|
# copy required file from our daily cache
|
|
cp ~/.cache/modelscope/hub/datasets/otavia/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json /tmp
|
|
# copy download through proxy
|
|
curl -o /tmp/test.jsonl -L https://gh-proxy.test.osinfra.cn/https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl
|
|
|
|
- name: Run test
|
|
timeout-minutes: 60
|
|
env:
|
|
SGLANG_USE_MODELSCOPE: true
|
|
SGLANG_IS_IN_CI: true
|
|
HF_ENDPOINT: https://hf-mirror.com
|
|
TORCH_EXTENSIONS_DIR: /tmp/torch_extensions
|
|
run: |
|
|
cd test/srt
|
|
python3 run_suite.py --suite per-commit-1-ascend-npu
|
|
|
|
per-commit-2-ascend-npu:
|
|
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'run-ci')
|
|
runs-on: linux-arm64-npu-2
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
# speed up by using infra cache services
|
|
CACHING_URL="cache-service.nginx-pypi-cache.svc.cluster.local"
|
|
sed -Ei "s@(ports|archive).ubuntu.com@${CACHING_URL}:8081@g" /etc/apt/sources.list
|
|
pip config set global.index-url http://${CACHING_URL}/pypi/simple
|
|
pip config set global.trusted-host ${CACHING_URL}
|
|
|
|
bash scripts/ci/npu_ci_install_dependency.sh
|
|
# copy required file from our daily cache
|
|
cp ~/.cache/modelscope/hub/datasets/otavia/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json /tmp
|
|
# copy download through proxy
|
|
curl -o /tmp/test.jsonl -L https://gh-proxy.test.osinfra.cn/https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl
|
|
|
|
- name: Run test
|
|
timeout-minutes: 90
|
|
env:
|
|
SGLANG_USE_MODELSCOPE: true
|
|
SGLANG_IS_IN_CI: true
|
|
HF_ENDPOINT: https://hf-mirror.com
|
|
TORCH_EXTENSIONS_DIR: /tmp/torch_extensions
|
|
run: |
|
|
cd test/srt
|
|
python3 run_suite.py --suite per-commit-2-ascend-npu
|
|
|
|
per-commit-4-ascend-npu:
|
|
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'run-ci')
|
|
runs-on: linux-arm64-npu-4
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
# speed up by using infra cache services
|
|
CACHING_URL="cache-service.nginx-pypi-cache.svc.cluster.local"
|
|
sed -Ei "s@(ports|archive).ubuntu.com@${CACHING_URL}:8081@g" /etc/apt/sources.list
|
|
pip config set global.index-url http://${CACHING_URL}/pypi/simple
|
|
pip config set global.trusted-host ${CACHING_URL}
|
|
|
|
bash scripts/ci/npu_ci_install_dependency.sh
|
|
# copy required file from our daily cache
|
|
cp ~/.cache/modelscope/hub/datasets/otavia/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json /tmp
|
|
# copy download through proxy
|
|
curl -o /tmp/test.jsonl -L https://gh-proxy.test.osinfra.cn/https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl
|
|
|
|
- name: Run test
|
|
timeout-minutes: 120
|
|
env:
|
|
SGLANG_USE_MODELSCOPE: true
|
|
SGLANG_IS_IN_CI: true
|
|
HF_ENDPOINT: https://hf-mirror.com
|
|
TORCH_EXTENSIONS_DIR: /tmp/torch_extensions
|
|
run: |
|
|
cd test/srt
|
|
python3 run_suite.py --suite per-commit-4-ascend-npu --timeout-per-file 3600
|
|
|
|
per-commit-16-ascend-a3:
|
|
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'run-ci')
|
|
runs-on: linux-aarch64-a3-16
|
|
container:
|
|
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
# speed up by using infra cache services
|
|
CACHING_URL="cache-service.nginx-pypi-cache.svc.cluster.local"
|
|
sed -Ei "s@(ports|archive).ubuntu.com@${CACHING_URL}:8081@g" /etc/apt/sources.list
|
|
pip config set global.index-url http://${CACHING_URL}/pypi/simple
|
|
pip config set global.trusted-host ${CACHING_URL}
|
|
|
|
bash scripts/ci/npu_ci_install_dependency.sh
|
|
# copy required file from our daily cache
|
|
cp ~/.cache/modelscope/hub/datasets/otavia/ShareGPT_Vicuna_unfiltered/ShareGPT_V3_unfiltered_cleaned_split.json /tmp
|
|
# copy download through proxy
|
|
curl -o /tmp/test.jsonl -L https://gh-proxy.test.osinfra.cn/https://raw.githubusercontent.com/openai/grade-school-math/master/grade_school_math/data/test.jsonl
|
|
|
|
- name: Run test
|
|
timeout-minutes: 90
|
|
env:
|
|
SGLANG_USE_MODELSCOPE: true
|
|
SGLANG_IS_IN_CI: true
|
|
HF_ENDPOINT: https://hf-mirror.com
|
|
TORCH_EXTENSIONS_DIR: /tmp/torch_extensions
|
|
run: |
|
|
cd test/srt
|
|
python3 run_suite.py --suite per-commit-16-ascend-a3 --timeout-per-file 5400
|