diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index f7249184..ee2ddad6 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -110,7 +110,8 @@ jobs: pytest -sv --durations=0 tests/e2e/singlecard/test_completion_with_prompt_embeds.py pytest -sv --durations=0 tests/e2e/singlecard/test_aclgraph_accuracy.py pytest -sv --durations=0 tests/e2e/singlecard/test_async_scheduling.py - pytest -sv --durations=0 tests/e2e/singlecard/test_guided_decoding.py + # xgrammar has parameter mismatching bug, please follows: https://github.com/vllm-project/vllm-ascend/issues/5524 + # pytest -sv --durations=0 tests/e2e/singlecard/test_guided_decoding.py # torch 2.8 doesn't work with lora, fix me #pytest -sv --durations=0 tests/e2e/singlecard/test_ilama_lora.py pytest -sv --durations=0 tests/e2e/singlecard/test_profile_execute_duration.py diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index 3416973c..6ba035cf 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -34,7 +34,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=45c1ca1ca1ee8fa06df263c8715e8a412ff408d4 + VLLM_COMMIT=7157596103666ee7ccb7008acee8bff8a8ff1731 echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV - name: Checkout repository diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index f81081b1..7a8d6b44 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -74,7 +74,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] + vllm_version: [7157596103666ee7ccb7008acee8bff8a8ff1731, v0.13.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index 1de9ee36..0232f5c5 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -42,7 +42,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: 45c1ca1ca1ee8fa06df263c8715e8a412ff408d4 + vllm: 7157596103666ee7ccb7008acee8bff8a8ff1731 changes: runs-on: linux-aarch64-a2-0 outputs: @@ -90,7 +90,7 @@ jobs: SOC_VERSION: ascend910b1 strategy: matrix: - vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] + vllm_version: [7157596103666ee7ccb7008acee8bff8a8ff1731, v0.13.0] steps: - name: Free up disk space @@ -163,7 +163,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] + vllm_version: [7157596103666ee7ccb7008acee8bff8a8ff1731, v0.13.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 20ddab93..211867da 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -51,7 +51,7 @@ If you're using v0.7.3, don't forget to install [mindie-turbo](https://pypi.org/ For main branch of vLLM Ascend, we usually make it compatible with the latest vLLM release and a newer commit hash of vLLM. Please note that this table is usually updated. Please check it regularly. | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | +| main | 7157596103666ee7ccb7008acee8bff8a8ff1731, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | ## Release cadence diff --git a/tests/e2e/multicard/test_aclgraph_capture_replay.py b/tests/e2e/multicard/test_aclgraph_capture_replay.py index 847d3aaa..38a931fb 100644 --- a/tests/e2e/multicard/test_aclgraph_capture_replay.py +++ b/tests/e2e/multicard/test_aclgraph_capture_replay.py @@ -107,6 +107,8 @@ def _run_worker_process( quantization="ascend" if "W8A8" in model_path else None, enable_expert_parallel=True if "DeepSeek" in model_path else False, trust_remote_code=True, + # vllm enables async scheduling by default, remove below when vllm >= 0.14.0 + async_scheduling=False, ) # Expose model config to the main test process