diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 20dbc83a..4e921a24 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -19,3 +19,5 @@ self-hosted-runner: - linux-amd64-cpu-8 - linux-amd64-cpu-16 - linux-aarch64-a3-0 + - linux-amd64-cpu-8-hk + - linux-amd64-cpu-16-hk diff --git a/.github/workflows/_pre_commit.yml b/.github/workflows/_pre_commit.yml index 7bad3c2f..bc13d86d 100644 --- a/.github/workflows/_pre_commit.yml +++ b/.github/workflows/_pre_commit.yml @@ -12,15 +12,27 @@ permissions: jobs: pre-commit: - runs-on: ubuntu-latest + runs-on: linux-amd64-cpu-16-hk + container: + # Build it from https://github.com/nv-action/vllm-benchmarks/blob/main/Dockerfile + image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/vllm-ascend:nightly-cpu + env: + GOPROXY: https://goproxy.io,direct + GITHUB_WORKSPACE: /__w/vllm-ascend/vllm-ascend steps: - name: Checkout vllm-project/vllm-ascend repo uses: actions/checkout@v6 - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - with: - python-version: "3.11" - - run: echo "::add-matcher::.github/workflows/matchers/actionlint.json" - - run: echo "::add-matcher::.github/workflows/matchers/mypy.json" + + # With problem matchers in a container, the output of $GITHUB_WORKSPACE and ${{ github.workspace }} are different. + # So we will just copy it into the path ${{ github.workspace }}. see https://github.com/actions/runner/issues/2058 + - name: cp problem matchers + run: | + cp .github/workflows/matchers/actionlint.json "$RUNNER_TEMP/actionlint.json" + cp .github/workflows/matchers/mypy.json "$RUNNER_TEMP/mypy.json" + + - run: echo "::add-matcher::$RUNNER_TEMP/actionlint.json" + - run: echo "::add-matcher::$RUNNER_TEMP/mypy.json" + - name: Checkout vllm-project/vllm repo uses: actions/checkout@v6 with: @@ -30,13 +42,16 @@ jobs: - name: Install vllm working-directory: vllm-empty run: | - pip install -r requirements/build.txt --extra-index-url https://download.pytorch.org/whl/cpu - VLLM_TARGET_DEVICE=empty pip install . + VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/ - name: Install vllm-ascend dev run: | + git config --global --add safe.directory /__w/vllm-ascend/vllm-ascend pip install -r requirements-dev.txt --extra-index-url https://download.pytorch.org/whl/cpu - - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 + - name: Run pre-commit env: + PRE_COMMIT_COLOR: always + FORCE_COLOR: "1" + TERM: xterm-256color SHELLCHECK_OPTS: "--exclude=SC2046,SC2006,SC2086" # Exclude SC2046, SC2006, SC2086 for actionlint - with: - extra_args: --all-files --hook-stage manual + run: | + pre-commit run --all-files --hook-stage manual diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index 1b03067e..d0b15805 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -80,27 +80,25 @@ jobs: name: unit test # only trigger unit test after lint passed and the change is e2e and ut related. if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} - runs-on: ubuntu-22.04-arm + runs-on: linux-amd64-cpu-16-hk container: # fixme: vllm-ascend install failed with 8.3.rc2 on github action - image: quay.io/ascend/cann:8.2.rc2-910b-ubuntu22.04-py3.11 + image: quay.nju.edu.cn/ascend/cann:8.2.rc2-910b-ubuntu22.04-py3.11 env: VLLM_LOGGING_LEVEL: ERROR VLLM_USE_MODELSCOPE: True SOC_VERSION: ascend910b1 + MAX_JOBS: 4 strategy: matrix: vllm_version: [2f4e6548efec402b913ffddc8726230d9311948d, v0.13.0] steps: - - name: Free up disk space - uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 - with: - tool-cache: true - docker-images: false - - name: Install packages run: | + sed -Ei 's@(ports|archive).ubuntu.com@cache-service.nginx-pypi-cache.svc.cluster.local:8081@g' /etc/apt/sources.list + pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple + pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local apt-get update -y apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev curl gnupg2 @@ -114,7 +112,7 @@ jobs: - name: Install vllm-project/vllm from source working-directory: ./vllm-empty run: | - VLLM_TARGET_DEVICE=empty python3 -m pip install . + VLLM_TARGET_DEVICE=empty python3 -m pip install . --extra-index https://download.pytorch.org/whl/cpu/ python3 -m pip uninstall -y triton - name: Checkout vllm-project/vllm-ascend repo @@ -123,9 +121,9 @@ jobs: - name: Install vllm-project/vllm-ascend run: | export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/devlib - python3 -m pip install -r requirements-dev.txt - python3 -m pip install -v . + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib + python3 -m pip install -v . --extra-index https://download.pytorch.org/whl/cpu/ + python3 -m pip install -r requirements-dev.txt --extra-index https://download.pytorch.org/whl/cpu/ - name: Install Ascend toolkit & triton_ascend shell: bash -l {0} @@ -141,7 +139,7 @@ jobs: VLLM_WORKER_MULTIPROC_METHOD: spawn TORCH_DEVICE_BACKEND_AUTOLOAD: 0 run: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/devlib + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib pytest -sv --cov --cov-report=xml:unittests-coverage.xml tests/ut \ --ignore tests/ut/model_loader/netloader/test_netloader_elastic.py \ --ignore tests/ut/kv_connector/test_remote_prefill_lifecycle.py \ diff --git a/tools/mypy.sh b/tools/mypy.sh index bf9bc774..caac0a1d 100755 --- a/tools/mypy.sh +++ b/tools/mypy.sh @@ -32,7 +32,7 @@ fi run_mypy() { echo "Running mypy on $1" - mypy --check-untyped-defs --follow-imports skip --python-version "${PYTHON_VERSION}" "$@" + mypy --follow-imports skip --python-version "${PYTHON_VERSION}" "$@" } run_mypy vllm_ascend