From cd69385dab402a75d27800ec003cb73dcf0b44fa Mon Sep 17 00:00:00 2001 From: Yikun Jiang Date: Sun, 12 Oct 2025 17:27:50 +0800 Subject: [PATCH] Add models test and add serval new models yaml (#3394) ### What this PR does / why we need it? This PR added Add accuracy CI for servals new models - `ascend test / accuracy` is for PR triggered check popluar models accuracy - `ascedn test / models` is for accuracy report, full models test, nightly model test - Add Qwen2-Audio-7B-Instruct, Qwen2-VL-7B-Instruct, Qwen3-8B, Qwen3-VL-30B-A3B-Instruct ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? CI passed Closes: https://github.com/vllm-project/vllm-ascend/pull/2330 Closes: https://github.com/vllm-project/vllm-ascend/pull/3362 - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 --------- Signed-off-by: hfadzxy Signed-off-by: Yikun Jiang Co-authored-by: hfadzxy --- .github/workflows/_accuracy_test.yaml | 175 ++++++++++ .github/workflows/accuracy_test.yaml | 312 ++---------------- .../workflows/vllm_ascend_test_models.yaml | 177 ++++++++++ .../configs/Qwen2-Audio-7B-Instruct.yaml | 10 + .../models/configs/Qwen2-VL-7B-Instruct.yaml | 10 + tests/e2e/models/configs/Qwen3-8B.yaml | 11 + .../configs/Qwen3-VL-30B-A3B-Instruct.yaml | 12 + tests/e2e/models/configs/accuracy.txt | 8 +- tests/e2e/models/test_lm_eval_correctness.py | 4 +- 9 files changed, 434 insertions(+), 285 deletions(-) create mode 100644 .github/workflows/_accuracy_test.yaml create mode 100644 .github/workflows/vllm_ascend_test_models.yaml create mode 100644 tests/e2e/models/configs/Qwen2-Audio-7B-Instruct.yaml create mode 100644 tests/e2e/models/configs/Qwen2-VL-7B-Instruct.yaml create mode 100644 tests/e2e/models/configs/Qwen3-8B.yaml create mode 100644 tests/e2e/models/configs/Qwen3-VL-30B-A3B-Instruct.yaml diff --git a/.github/workflows/_accuracy_test.yaml b/.github/workflows/_accuracy_test.yaml new file mode 100644 index 0000000..4b4e199 --- /dev/null +++ b/.github/workflows/_accuracy_test.yaml @@ -0,0 +1,175 @@ +name: 'accuracy test' + +on: + workflow_call: + inputs: + vllm: + required: true + type: string + vllm-ascend: + required: false + type: string + default: main + runner: + required: true + type: string + image: + required: true + type: string + model_name: + required: true + type: string + upload: + required: false + type: boolean + default: false + +jobs: + accuracy_tests: + + runs-on: ${{ inputs.runner }} + name: ${{ inputs.model_name }} accuracy + container: + image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11 + env: + VLLM_USE_MODELSCOPE: True + # 1. If version specified (work_dispatch), do specified branch accuracy test + # 2. If no version (labeled PR), do accuracy test by default ref: + # The branch, tag or SHA to checkout. When checking out the repository that + # triggered a workflow, this defaults to the reference or SHA for that event. + # Otherwise, uses the default branch. + GHA_VLLM_ASCEND_VERSION: ${{ inputs.vllm-ascend }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set model name as output + id: set_output + run: | + echo "model_name=${{ inputs.model_name }}" >> $GITHUB_OUTPUT + + - name: Config mirrors + run: | + sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list + pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple + pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local + apt-get update -y + apt install git -y + + - name: Install system dependencies + run: | + apt-get -y install `cat packages.txt` + apt-get -y install gcc g++ cmake libnuma-dev + + - name: Checkout vllm-project/vllm repo + uses: actions/checkout@v4 + with: + repository: vllm-project/vllm + ref: ${{ inputs.vllm }} + path: ./vllm-empty + + - name: Install vllm-project/vllm from source + working-directory: ./vllm-empty + run: | + VLLM_TARGET_DEVICE=empty pip install -e . + + - name: Resolve vllm-ascend version + run: | + VERSION_INPUT="${{ inputs.vllm-ascend }}" + + if [[ "$VERSION_INPUT" == "latest" ]]; then + TAGS=$(git ls-remote --tags --sort=-v:refname https://github.com/vllm-project/vllm-ascend "v*" | cut -f2 | sed 's|refs/tags/||') + LATEST_TAG=$(echo "$TAGS" | head -n1) + if [[ -z "$LATEST_TAG" ]]; then + RESOLVED_VERSION="main" + else + RESOLVED_VERSION="$LATEST_TAG" + fi + else + RESOLVED_VERSION="$VERSION_INPUT" + fi + echo "GHA_VLLM_ASCEND_VERSION=$RESOLVED_VERSION" >> $GITHUB_ENV + + - name: Checkout vllm-project/vllm-ascend repo + uses: actions/checkout@v4 + with: + repository: vllm-project/vllm-ascend + path: ./vllm-ascend + ref: ${{ env.GHA_VLLM_ASCEND_VERSION }} + + - name: Install vllm-project/vllm-ascend + working-directory: ./vllm-ascend + env: + PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi + run: | + pip install -r requirements-dev.txt + pip install -v -e . + + - name: Get vLLM commit hash and URL + working-directory: ./vllm-empty + run: | + VLLM_COMMIT=$(git rev-parse --short=7 HEAD) + echo "VLLM_COMMIT=$VLLM_COMMIT" >> $GITHUB_ENV + + - name: Get vLLM-Ascend commit hash and URL + working-directory: ./vllm-ascend + run: | + VLLM_ASCEND_COMMIT=$(git rev-parse --short=7 HEAD) + echo "VLLM_ASCEND_COMMIT=$VLLM_ASCEND_COMMIT" >> $GITHUB_ENV + + - name: Collect version info + run: | + for dir in /usr/local/Ascend/ascend-toolkit/*; do + dname=$(basename "$dir") + if [ "$dname" != "latest" ]; then + TOOLKIT_DIR="$dname" + break + fi + done + INFO_FILE="/usr/local/Ascend/ascend-toolkit/${TOOLKIT_DIR}/$(uname -i)-linux/ascend_toolkit_install.info" + GHA_CANN_VERSION=$(grep "version=" "$INFO_FILE" \ + | head -n1 \ + | cut -d'=' -f2 \ + | tr -d '"') + { + echo "GHA_CANN_VERSION=$GHA_CANN_VERSION" + pip show torch | grep "Version:" | awk '{print "GHA_TORCH_VERSION="$2}' + pip show torch_npu | grep "Version:" | awk '{print "GHA_TORCH_NPU_VERSION="$2}' + pip show vllm | grep "Version:" | awk '{print "GHA_VLLM_VERSION="$2}' | sed 's/+.*//' + } >> "$GITHUB_ENV" + + - name: Run accuracy test + id: report + env: + VLLM_WORKER_MULTIPROC_METHOD: spawn + VLLM_USE_MODELSCOPE: True + VLLM_VERSION: ${{ env.GHA_VLLM_VERSION }} + VLLM_COMMIT: ${{ env.VLLM_COMMIT }} + VLLM_ASCEND_VERSION: ${{ env.GHA_VLLM_ASCEND_VERSION || github.ref }} + VLLM_ASCEND_COMMIT: ${{ env.VLLM_ASCEND_COMMIT }} + CANN_VERSION: ${{ env.GHA_CANN_VERSION }} + TORCH_VERSION: ${{ env.GHA_TORCH_VERSION }} + TORCH_NPU_VERSION: ${{ env.GHA_TORCH_NPU_VERSION }} + run: | + model_base_name=$(basename ${{ inputs.model_name }}) + markdown_name="${model_base_name}" + echo "markdown_name=$markdown_name" >> $GITHUB_OUTPUT + mkdir -p ./benchmarks/accuracy + pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \ + --config ./tests/e2e/models/configs/${{ inputs.model_name }}.yaml + + - name: Generate step summary + if: ${{ always() }} + run: | + cat ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md >> $GITHUB_STEP_SUMMARY + + - name: Upload Report + if: ${{ inputs.upload == true }} + uses: actions/upload-artifact@v4 + with: + name: "report-${{ env.GHA_VLLM_ASCEND_VERSION }}-${{ steps.report.outputs.markdown_name }}" + path: ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md + if-no-files-found: warn + retention-days: 90 + overwrite: true diff --git a/.github/workflows/accuracy_test.yaml b/.github/workflows/accuracy_test.yaml index 4fbeb91..6d0faf8 100644 --- a/.github/workflows/accuracy_test.yaml +++ b/.github/workflows/accuracy_test.yaml @@ -16,28 +16,15 @@ # # This test will be triggered: -# 1. PR labeled with: '*accuracy-test' (ONLY 1 label valid) & 'ready-for-test' -# 2. workflow_dispatch with models input -# See detail rule in strategy.matrix note +# - PR labeled with: 'accuracy-test' & 'ready-for-test' name: ascend test / accuracy on: - schedule: - # Runs every 6 hours - - cron: '0 */6 * * *' pull_request: - types: [ labeled ] - workflow_dispatch: - inputs: - vllm-ascend-version: - description: 'vllm-ascend:' - required: true - type: choice - # Current supported vLLM versions - options: - - latest - - main - default: main + branches: + - 'main' + - '*-dev' + types: [ labeled, synchronize ] # Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly # declared as "shell: bash -el {0}" on steps that need to be properly activated. @@ -52,271 +39,34 @@ concurrency: cancel-in-progress: true jobs: - accuracy_tests: - # test will be triggered when tag '*-accuracy-test' & 'ready-for-test' or workflow_dispatch job + run: + name: "" + strategy: + matrix: + # Only top series models should be listed in here + include: + - runner: a2-1 + model_name: Qwen3-8B + - runner: a2-1 + model_name: Qwen2.5-VL-7B-Instruct + - runner: a2-1 + model_name: Qwen2-Audio-7B-Instruct + - runner: a2-2 + model_name: Qwen3-30B-A3B + - runner: a2-2 + model_name: Qwen3-VL-30B-A3B-Instruct + - runner: a2-2 + model_name: DeepSeek-V2-Lite + fail-fast: false + # test will be triggered when tag 'accuracy-test' & 'ready-for-test' if: >- ${{ contains(github.event.pull_request.labels.*.name, 'accuracy-test') && - contains(github.event.pull_request.labels.*.name, 'ready-for-test') || - github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' + contains(github.event.pull_request.labels.*.name, 'ready-for-test') }} - runs-on: ${{ matrix.runner }} - strategy: - matrix: - include: - - model_name: Qwen3-8B-Base - runner: linux-aarch64-a2-1 - - model_name: Qwen2.5-VL-7B-Instruct - runner: linux-aarch64-a2-1 - - model_name: Qwen3-30B-A3B - runner: linux-aarch64-a2-2 - - model_name: DeepSeek-V2-Lite - runner: linux-aarch64-a2-2 - fail-fast: false - - name: ${{ matrix.model_name }} accuracy - container: + uses: ./.github/workflows/_accuracy_test.yaml + with: + vllm: v0.11.0 + runner: linux-aarch64-${{ matrix.runner }} image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11 - env: - VLLM_USE_MODELSCOPE: True - # 1. If version specified (work_dispatch), do specified branch accuracy test - # 2. If no version (labeled PR), do accuracy test by default ref: - # The branch, tag or SHA to checkout. When checking out the repository that - # triggered a workflow, this defaults to the reference or SHA for that event. - # Otherwise, uses the default branch. - GHA_VLLM_ASCEND_VERSION: ${{ github.event.inputs.vllm-ascend-version }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Set model name as output - id: set_output - run: | - echo "model_name=${{ matrix.model_name }}" >> $GITHUB_OUTPUT - - - name: Config mirrors - run: | - sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list - pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple - pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local - apt-get update -y - apt install git -y - - - name: Install system dependencies - run: | - apt-get -y install `cat packages.txt` - apt-get -y install gcc g++ cmake libnuma-dev - - - name: Checkout vllm-project/vllm repo - uses: actions/checkout@v4 - with: - repository: vllm-project/vllm - ref: v0.11.0 - path: ./vllm-empty - - - name: Install vllm-project/vllm from source - working-directory: ./vllm-empty - run: | - VLLM_TARGET_DEVICE=empty pip install -e . - - - name: Resolve vllm-ascend version - run: | - VERSION_INPUT="${{ github.event.inputs.vllm-ascend-version }}" - - if [[ "$VERSION_INPUT" == "latest" ]]; then - TAGS=$(git ls-remote --tags --sort=-v:refname https://github.com/vllm-project/vllm-ascend "v*" | cut -f2 | sed 's|refs/tags/||') - LATEST_TAG=$(echo "$TAGS" | head -n1) - if [[ -z "$LATEST_TAG" ]]; then - RESOLVED_VERSION="main" - else - RESOLVED_VERSION="$LATEST_TAG" - fi - else - RESOLVED_VERSION="$VERSION_INPUT" - fi - echo "GHA_VLLM_ASCEND_VERSION=$RESOLVED_VERSION" >> $GITHUB_ENV - - - name: Checkout vllm-project/vllm-ascend repo - uses: actions/checkout@v4 - with: - repository: vllm-project/vllm-ascend - path: ./vllm-ascend - ref: ${{ env.GHA_VLLM_ASCEND_VERSION }} - - - name: Install vllm-project/vllm-ascend - working-directory: ./vllm-ascend - env: - PIP_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi - run: | - pip install -r requirements-dev.txt - pip install -v -e . - - - name: Get vLLM commit hash and URL - working-directory: ./vllm-empty - run: | - VLLM_COMMIT=$(git rev-parse --short=7 HEAD) - echo "VLLM_COMMIT=$VLLM_COMMIT" >> $GITHUB_ENV - - - name: Get vLLM-Ascend commit hash and URL - working-directory: ./vllm-ascend - run: | - VLLM_ASCEND_COMMIT=$(git rev-parse --short=7 HEAD) - echo "VLLM_ASCEND_COMMIT=$VLLM_ASCEND_COMMIT" >> $GITHUB_ENV - - - name: Collect version info - run: | - for dir in /usr/local/Ascend/ascend-toolkit/*; do - dname=$(basename "$dir") - if [ "$dname" != "latest" ]; then - TOOLKIT_DIR="$dname" - break - fi - done - INFO_FILE="/usr/local/Ascend/ascend-toolkit/${TOOLKIT_DIR}/$(uname -i)-linux/ascend_toolkit_install.info" - GHA_CANN_VERSION=$(grep "version=" "$INFO_FILE" \ - | head -n1 \ - | cut -d'=' -f2 \ - | tr -d '"') - { - echo "GHA_CANN_VERSION=$GHA_CANN_VERSION" - pip show torch | grep "Version:" | awk '{print "GHA_TORCH_VERSION="$2}' - pip show torch_npu | grep "Version:" | awk '{print "GHA_TORCH_NPU_VERSION="$2}' - pip show vllm | grep "Version:" | awk '{print "GHA_VLLM_VERSION="$2}' | sed 's/+.*//' - } >> "$GITHUB_ENV" - - - name: Run accuracy test - id: report - env: - VLLM_WORKER_MULTIPROC_METHOD: spawn - VLLM_USE_MODELSCOPE: True - VLLM_VERSION: ${{ env.GHA_VLLM_VERSION }} - VLLM_COMMIT: ${{ env.VLLM_COMMIT }} - VLLM_ASCEND_VERSION: ${{ env.GHA_VLLM_ASCEND_VERSION || github.ref }} - VLLM_ASCEND_COMMIT: ${{ env.VLLM_ASCEND_COMMIT }} - CANN_VERSION: ${{ env.GHA_CANN_VERSION }} - TORCH_VERSION: ${{ env.GHA_TORCH_VERSION }} - TORCH_NPU_VERSION: ${{ env.GHA_TORCH_NPU_VERSION }} - run: | - model_base_name=$(basename ${{ matrix.model_name }}) - markdown_name="${model_base_name}" - echo "markdown_name=$markdown_name" >> $GITHUB_OUTPUT - mkdir -p ./benchmarks/accuracy - pytest -sv ./tests/e2e/models/test_lm_eval_correctness.py \ - --config ./tests/e2e/models/configs/${{ matrix.model_name }}.yaml - - - name: Generate step summary - if: ${{ always() }} - run: | - cat ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md >> $GITHUB_STEP_SUMMARY - - - name: Sanitize version string for artifact naming - run: | - SAFE_VLLM_ASCEND_VERSION="${GHA_VLLM_ASCEND_VERSION//\//-}" - echo "SAFE_VLLM_ASCEND_VERSION=$SAFE_VLLM_ASCEND_VERSION" >> "$GITHUB_ENV" - - - name: Upload Report - uses: actions/upload-artifact@v4 - with: - name: "report-${{ env.SAFE_VLLM_ASCEND_VERSION }}-${{ steps.report.outputs.markdown_name }}" - path: ./benchmarks/accuracy/${{ steps.report.outputs.markdown_name }}.md - if-no-files-found: warn - retention-days: 90 - overwrite: true - - outputs: - model_name: ${{ steps.set_output.outputs.model_name }} - vllm_ascend_version: ${{ env.GHA_VLLM_ASCEND_VERSION }} - - create_pr: - runs-on: ubuntu-latest - needs: accuracy_tests - if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.vllm-ascend-version == 'latest' }} - env: - UPSTREAM_REPO: vllm-project/vllm-ascend - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - repository: vllm-ascend-ci/vllm-ascend - token: ${{ secrets.PAT_TOKEN }} - ref: main - - - name: Add upstream remote - run: | - git remote add upstream https://github.com/${{ env.UPSTREAM_REPO }}.git - git fetch upstream - git remote -v - - - name: Set Git user info dynamically - run: | - git config user.name "${{ github.actor }}" - git config user.email "${{ github.actor }}@users.noreply.github.com" - - - name: Create or switch to branch - run: | - TIMESTAMP=$(date +%Y%m%d%H%M%S) - BRANCH_NAME="auto-pr/accuracy-report-${TIMESTAMP}" - echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITHUB_ENV - git checkout -B "${BRANCH_NAME}" upstream/main - - - name: Download only current run reports - uses: actions/download-artifact@v5 - with: - path: ./docs/source/developer_guide/evaluation/accuracy_report - pattern: report-* - github-token: ${{ secrets.GITHUB_TOKEN }} - run-id: ${{ github.run_id }} - - - name: Delete old report - run: | - find ./docs/source/developer_guide/evaluation/accuracy_report -maxdepth 1 -type f -name '*.md' ! -name 'index.md' -delete - find ./docs/source/developer_guide/evaluation/accuracy_report -mindepth 2 -type f -name '*.md' -exec mv -f {} ./docs/source/developer_guide/evaluation/accuracy_report \; - find ./docs/source/developer_guide/evaluation/accuracy_report -mindepth 1 -type d -empty -delete - - - name: Update accuracy_report/index.md - run: | - REPORT_DIR="./docs/source/developer_guide/evaluation/accuracy_report" - INDEX_MD="$REPORT_DIR/index.md" - { - echo "# Accuracy Report" - echo "" - echo ":::{toctree}" - echo ":caption: Accuracy Report" - echo ":maxdepth: 1" - - for report in "$REPORT_DIR"/*.md; do - filename="$(basename "$report" .md)" - if [ "$filename" != "index" ]; then - echo "$filename" - fi - done - echo ":::" - } > "$INDEX_MD" - - - name: push accuracy report - env: - GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} - run: | - git add ./docs/source/developer_guide/evaluation/accuracy_report/*.md - git commit -s -m "[Doc] Update accuracy reports for ${{ needs.accuracy_tests.outputs.vllm_ascend_version }}" - git push -f origin "${{ env.BRANCH_NAME }}" - - - name: Create PR in upstream via API - uses: actions/github-script@v8 - with: - github-token: ${{ secrets.PAT_TOKEN }} - script: | - const pr = await github.rest.pulls.create({ - owner: 'vllm-project', - repo: 'vllm-ascend', - head: `vllm-ascend-ci:${{ env.BRANCH_NAME }}`, - base: 'main', - title: `[Doc] Update accuracy reports for ${{ needs.accuracy_tests.outputs.vllm_ascend_version }}`, - body: `The accuracy results running on NPU Altlas A2 have changed, updating reports for: All models (Qwen3-30B-A3B, Qwen2.5-VL-7B-Instruct, Qwen3-8B-Base, DeepSeek-V2-Lite) - - - [Workflow run][1] - - [1]: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}` - }); - core.info(`Created PR #${pr.data.number}`); + model_name: ${{ matrix.model_name }} diff --git a/.github/workflows/vllm_ascend_test_models.yaml b/.github/workflows/vllm_ascend_test_models.yaml new file mode 100644 index 0000000..b026c04 --- /dev/null +++ b/.github/workflows/vllm_ascend_test_models.yaml @@ -0,0 +1,177 @@ +# +# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# This file is a part of the vllm-ascend project. +# + +# This test will be triggered: +# 1. schedule +# 2. pull_request change the related files +# 3. workflow_dispatch with models input + +name: ascend test / models + +on: + schedule: + # Runs every 6 hours + - cron: '0 */6 * * *' + pull_request: + branches: + - 'main' + - '*-dev' + paths: + - '.github/workflows/vllm_ascend_test_models.yaml' + - 'tests/e2e/models/test_lm_eval_correctness.py' + workflow_dispatch: + inputs: + vllm-ascend-version: + description: 'vllm-ascend:' + required: true + type: choice + # Current supported vLLM versions + options: + - latest + - main + default: main + +# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly +# declared as "shell: bash -el {0}" on steps that need to be properly activated. +# It's used to activate ascend-toolkit environment variables. +defaults: + run: + shell: bash -el {0} + +# only cancel in-progress runs of the same workflow +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + run: + strategy: + matrix: + include: + - model_name: Qwen3-8B + runner: a2-1 + - model_name: Qwen2.5-VL-7B-Instruct + runner: a2-1 + - model_name: Qwen2-Audio-7B-Instruct + runner: a2-1 + - model_name: Qwen3-30B-A3B + runner: a2-2 + - model_name: Qwen3-VL-30B-A3B-Instruct + runner: a2-2 + - model_name: DeepSeek-V2-Lite + runner: a2-2 + fail-fast: false + uses: ./.github/workflows/_accuracy_test.yaml + with: + vllm: v0.11.0 + runner: linux-aarch64-${{ matrix.runner }} + image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-910b-ubuntu22.04-py3.11 + model_name: ${{ matrix.model_name }} + upload: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.vllm-ascend-version == 'latest' }} + + create_pr: + runs-on: ubuntu-latest + needs: run + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.vllm-ascend-version == 'latest' }} + env: + UPSTREAM_REPO: vllm-project/vllm-ascend + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + repository: vllm-ascend-ci/vllm-ascend + token: ${{ secrets.PAT_TOKEN }} + ref: main + + - name: Add upstream remote + run: | + git remote add upstream https://github.com/${{ env.UPSTREAM_REPO }}.git + git fetch upstream + git remote -v + + - name: Set Git user info dynamically + run: | + git config user.name "${{ github.actor }}" + git config user.email "${{ github.actor }}@users.noreply.github.com" + + - name: Create or switch to branch + run: | + TIMESTAMP=$(date +%Y%m%d%H%M%S) + BRANCH_NAME="auto-pr/accuracy-report-${TIMESTAMP}" + echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITHUB_ENV + git checkout -B "${BRANCH_NAME}" upstream/main + + - name: Download only current run reports + uses: actions/download-artifact@v5 + with: + path: ./docs/source/developer_guide/evaluation/accuracy_report + pattern: report-* + github-token: ${{ secrets.GITHUB_TOKEN }} + run-id: ${{ github.run_id }} + + - name: Delete old report + run: | + find ./docs/source/developer_guide/evaluation/accuracy_report -maxdepth 1 -type f -name '*.md' ! -name 'index.md' -delete + find ./docs/source/developer_guide/evaluation/accuracy_report -mindepth 2 -type f -name '*.md' -exec mv -f {} ./docs/source/developer_guide/evaluation/accuracy_report \; + find ./docs/source/developer_guide/evaluation/accuracy_report -mindepth 1 -type d -empty -delete + + - name: Update accuracy_report/index.md + run: | + REPORT_DIR="./docs/source/developer_guide/evaluation/accuracy_report" + INDEX_MD="$REPORT_DIR/index.md" + { + echo "# Accuracy Report" + echo "" + echo ":::{toctree}" + echo ":caption: Accuracy Report" + echo ":maxdepth: 1" + + for report in "$REPORT_DIR"/*.md; do + filename="$(basename "$report" .md)" + if [ "$filename" != "index" ]; then + echo "$filename" + fi + done + echo ":::" + } > "$INDEX_MD" + + - name: push accuracy report + env: + GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} + run: | + git add ./docs/source/developer_guide/evaluation/accuracy_report/*.md + git commit -s -m "[Doc] Update accuracy reports for ${{ env.BRANCH_NAME }}" + git push -f origin "${{ env.BRANCH_NAME }}" + + - name: Create PR in upstream via API + uses: actions/github-script@v8 + with: + github-token: ${{ secrets.PAT_TOKEN }} + script: | + const pr = await github.rest.pulls.create({ + owner: 'vllm-project', + repo: 'vllm-ascend', + head: `vllm-ascend-ci:${{ env.BRANCH_NAME }}`, + base: 'main', + title: `[Doc] Update accuracy reports for ${{ env.BRANCH_NAME }}`, + body: `The accuracy results running on NPU Altlas A2 have changed, updating reports for: All models + + - [Workflow run][1] + + [1]: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}` + }); + core.info(`Created PR #${pr.data.number}`); diff --git a/tests/e2e/models/configs/Qwen2-Audio-7B-Instruct.yaml b/tests/e2e/models/configs/Qwen2-Audio-7B-Instruct.yaml new file mode 100644 index 0000000..5c810ef --- /dev/null +++ b/tests/e2e/models/configs/Qwen2-Audio-7B-Instruct.yaml @@ -0,0 +1,10 @@ +model_name: "Qwen/Qwen2-Audio-7B-Instruct" +hardware: "Atlas A2 Series" +tasks: +- name: "gsm8k" + metrics: + - name: "exact_match,strict-match" + value: 0.44 + - name: "exact_match,flexible-extract" + value: 0.45 +num_fewshot: 5 diff --git a/tests/e2e/models/configs/Qwen2-VL-7B-Instruct.yaml b/tests/e2e/models/configs/Qwen2-VL-7B-Instruct.yaml new file mode 100644 index 0000000..576b4de --- /dev/null +++ b/tests/e2e/models/configs/Qwen2-VL-7B-Instruct.yaml @@ -0,0 +1,10 @@ +model_name: "Qwen/Qwen2-VL-7B-Instruct" +hardware: "Atlas A2 Series" +model: "vllm-vlm" +tasks: +- name: "mmmu_val" + metrics: + - name: "acc,none" + value: 0.50 +max_model_len: 8192 +gpu_memory_utilization: 0.7 diff --git a/tests/e2e/models/configs/Qwen3-8B.yaml b/tests/e2e/models/configs/Qwen3-8B.yaml new file mode 100644 index 0000000..c3d75f0 --- /dev/null +++ b/tests/e2e/models/configs/Qwen3-8B.yaml @@ -0,0 +1,11 @@ +model_name: "Qwen/Qwen3-8B" +hardware: "Atlas A2 Series" +tasks: +- name: "gsm8k" + metrics: + - name: "exact_match,strict-match" + value: 0.765 + - name: "exact_match,flexible-extract" + value: 0.81 +num_fewshot: 5 +enable_thinking: False diff --git a/tests/e2e/models/configs/Qwen3-VL-30B-A3B-Instruct.yaml b/tests/e2e/models/configs/Qwen3-VL-30B-A3B-Instruct.yaml new file mode 100644 index 0000000..5b5dc05 --- /dev/null +++ b/tests/e2e/models/configs/Qwen3-VL-30B-A3B-Instruct.yaml @@ -0,0 +1,12 @@ +model_name: "Qwen/Qwen3-VL-30B-A3B-Instruct" +hardware: "Atlas A2 Series" +model: "vllm-vlm" +tasks: +- name: "mmmu_val" + metrics: + - name: "acc,none" + value: 0.58 +max_model_len: 8192 +tensor_parallel_size: 2 +gpu_memory_utilization: 0.7 +enable_expert_parallel: True diff --git a/tests/e2e/models/configs/accuracy.txt b/tests/e2e/models/configs/accuracy.txt index 2184a59..3bdcfd8 100644 --- a/tests/e2e/models/configs/accuracy.txt +++ b/tests/e2e/models/configs/accuracy.txt @@ -1,4 +1,8 @@ DeepSeek-V2-Lite.yaml -Qwen3-8B-Base.yaml Qwen2.5-VL-7B-Instruct.yaml -Qwen3-30B-A3B.yaml \ No newline at end of file +Qwen3-30B-A3B.yaml +Qwen3-8B.yaml +Qwen2-7B.yaml +Qwen2-VL-7B-Instruct.yaml +Qwen2-Audio-7B-Instruct.yaml +Qwen3-VL-30B-A3B-Instruct.yaml diff --git a/tests/e2e/models/test_lm_eval_correctness.py b/tests/e2e/models/test_lm_eval_correctness.py index eaef67d..b171e59 100644 --- a/tests/e2e/models/test_lm_eval_correctness.py +++ b/tests/e2e/models/test_lm_eval_correctness.py @@ -7,7 +7,7 @@ import pytest import yaml from jinja2 import Environment, FileSystemLoader -RTOL = 0.03 +RTOL = 0.05 TEST_DIR = os.path.dirname(__file__) @@ -48,7 +48,7 @@ def build_model_args(eval_config, tp_size): } for s in [ "max_images", "gpu_memory_utilization", "enable_expert_parallel", - "tensor_parallel_size", "enforce_eager" + "tensor_parallel_size", "enforce_eager", "enable_thinking" ]: val = eval_config.get(s, None) if val is not None: