From 73a3f822c78c789befba8bb0c8d65f03c5094c0e Mon Sep 17 00:00:00 2001 From: wjunLu <135617475+wjunLu@users.noreply.github.com> Date: Thu, 15 Jan 2026 23:22:43 +0800 Subject: [PATCH] [Main2Main] Upgrade vllm commit to releases/v0.14.0 (#5911) ### What this PR does / why we need it? Upgrade vllm commit to releases/v0.14.0 - Re-open cases in `tests/e2e/singlecard/pooling/test_scoring.py`, since the errors before have been fixed by https://github.com/vllm-project/vllm/pull/32243 ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.13.0 - vLLM main: https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9 Signed-off-by: wjunLu --- .github/workflows/_e2e_test.yaml | 2 +- .github/workflows/bot_pr_create.yaml | 2 +- .github/workflows/pr_test_full.yaml | 2 +- .github/workflows/pr_test_light.yaml | 6 +++--- .github/workflows/schedule_codecov_refresh.yaml | 2 +- docs/source/community/versioning_policy.md | 2 +- tests/e2e/singlecard/pooling/test_scoring.py | 14 -------------- 7 files changed, 8 insertions(+), 22 deletions(-) diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index ed744973..245fa85d 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -446,4 +446,4 @@ jobs: PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256 VLLM_WORKER_MULTIPROC_METHOD: spawn run: | - pytest -sv --durations=0 tests/e2e/310p/test_offline_inference_parallel_310p.py + pytest -sv --durations=0 tests/e2e/310p/test_offline_inference_parallel_310p.py \ No newline at end of file diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index cf17cd5b..88a6e7c7 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -37,7 +37,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=11b6af5280d6d6dfb8953af16e67b25f819b3be9 + VLLM_COMMIT=2c24bc6996cb165fce92f780b388a5e39b3f4060 echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV - name: Checkout repository diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index 16461015..39c9f584 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -75,7 +75,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0] + vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index 3180f561..5469a897 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -41,7 +41,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: 11b6af5280d6d6dfb8953af16e67b25f819b3be9 + vllm: 2c24bc6996cb165fce92f780b388a5e39b3f4060 changes: runs-on: linux-aarch64-a2-0 outputs: @@ -84,7 +84,7 @@ jobs: if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} strategy: matrix: - vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0] + vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} @@ -96,7 +96,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0] + vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/.github/workflows/schedule_codecov_refresh.yaml b/.github/workflows/schedule_codecov_refresh.yaml index 0093679f..c0c9b284 100644 --- a/.github/workflows/schedule_codecov_refresh.yaml +++ b/.github/workflows/schedule_codecov_refresh.yaml @@ -33,7 +33,7 @@ jobs: name: refresh codecov strategy: matrix: - vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9] + vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index b2033e32..a6d79523 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -53,7 +53,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | +| main | 2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | ## Release cadence diff --git a/tests/e2e/singlecard/pooling/test_scoring.py b/tests/e2e/singlecard/pooling/test_scoring.py index d8105ce0..fb81b6f2 100644 --- a/tests/e2e/singlecard/pooling/test_scoring.py +++ b/tests/e2e/singlecard/pooling/test_scoring.py @@ -5,8 +5,6 @@ import torch import torch.nn.functional as F from modelscope import snapshot_download # type: ignore[import-untyped] -from vllm_ascend.utils import vllm_version_is - from tests.e2e.conftest import HfRunner, VllmRunner CROSS_ENCODER_MODELS = [ @@ -35,10 +33,6 @@ DTYPE = "half" def model_name(request): yield snapshot_download(request.param) -@pytest.mark.skipif( - not vllm_version_is('0.13.0'), - reason="vLLM PR-32148 changed the behavior of cross scoring", -) def test_cross_encoder_score_1_to_1(model_name): text_pair = [TEXTS_1[0], TEXTS_2[0]] @@ -58,10 +52,6 @@ def test_cross_encoder_score_1_to_1(model_name): assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01) -@pytest.mark.skipif( - not vllm_version_is('0.13.0'), - reason="vLLM PR-32148 changed the behavior of cross scoring", -) def test_cross_encoder_score_1_to_N(model_name): text_pairs = [ [TEXTS_1[0], TEXTS_2[0]], @@ -85,10 +75,6 @@ def test_cross_encoder_score_1_to_N(model_name): assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01) -@pytest.mark.skipif( - not vllm_version_is('0.13.0'), - reason="vLLM PR-32148 changed the behavior of cross scoring", -) def test_cross_encoder_score_N_to_N(model_name): text_pairs = [ [TEXTS_1[0], TEXTS_2[0]],