Update vllm pin to 12.25 (#5342)

### What this PR does / why we need it?
- Fix vllm break in the pr:
1.[Drop v0.14 deprecations
]https://github.com/vllm-project/vllm/pull/31285
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
- vLLM version: release/v0.13.0
- vLLM main:
bc0a5a0c08

---------

Signed-off-by: ZT-AIA <1028681969@qq.com>
This commit is contained in:
ZT-AIA
2025-12-26 14:05:40 +08:00
committed by GitHub
parent c2f776b846
commit adaa89a7a5
20 changed files with 22 additions and 22 deletions

View File

@@ -34,7 +34,7 @@ jobs:
steps:
- name: Get vLLM version
run: |
VLLM_COMMIT=bc0a5a0c089844b17cb93f3294348f411e523586
VLLM_COMMIT=254f6b986720c92ddf97fbb1a6a6465da8e87e29
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV
- name: Checkout repository

View File

@@ -74,7 +74,7 @@ jobs:
name: e2e-full
strategy:
matrix:
vllm_version: [bc0a5a0c089844b17cb93f3294348f411e523586, v0.13.0]
vllm_version: [254f6b986720c92ddf97fbb1a6a6465da8e87e29, v0.13.0]
needs: [changes]
if: ${{ needs.changes.outputs.e2e_tracker == 'true' }}
uses: ./.github/workflows/_e2e_test.yaml

View File

@@ -42,7 +42,7 @@ jobs:
lint:
uses: ./.github/workflows/_pre_commit.yml
with:
vllm: bc0a5a0c089844b17cb93f3294348f411e523586
vllm: 254f6b986720c92ddf97fbb1a6a6465da8e87e29
changes:
runs-on: linux-aarch64-a2-0
outputs:
@@ -90,7 +90,7 @@ jobs:
SOC_VERSION: ascend910b1
strategy:
matrix:
vllm_version: [bc0a5a0c089844b17cb93f3294348f411e523586, v0.13.0]
vllm_version: [254f6b986720c92ddf97fbb1a6a6465da8e87e29, v0.13.0]
steps:
- name: Free up disk space
@@ -160,7 +160,7 @@ jobs:
name: e2e-light
strategy:
matrix:
vllm_version: [bc0a5a0c089844b17cb93f3294348f411e523586, v0.13.0]
vllm_version: [254f6b986720c92ddf97fbb1a6a6465da8e87e29, v0.13.0]
# Note (yikun): If CI resource are limited we can split job into two chain jobs
needs: [lint, changes]
# only trigger e2e test after lint passed and the change is e2e related with pull request.

View File

@@ -50,7 +50,7 @@ If you're using v0.7.3, don't forget to install [mindie-turbo](https://pypi.org/
For main branch of vLLM Ascend, we usually make it compatible with the latest vLLM release and a newer commit hash of vLLM. Please note that this table is usually updated. Please check it regularly.
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|-------------|--------------|------------------|-------------|--------------------|
| main | bc0a5a0c089844b17cb93f3294348f411e523586, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
| main | 254f6b986720c92ddf97fbb1a6a6465da8e87e29, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
## Release cadence

View File

@@ -28,7 +28,7 @@ from unittest.mock import patch
import openai
import pytest
from modelscope import snapshot_download # type: ignore
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer, VllmRunner

View File

@@ -19,7 +19,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -17,7 +17,7 @@
import json
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import get_TTFT, run_aisbench_cases

View File

@@ -17,7 +17,7 @@
import json
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import get_TTFT, run_aisbench_cases

View File

@@ -16,7 +16,7 @@
#
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -19,7 +19,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -19,7 +19,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -19,7 +19,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -19,7 +19,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -20,7 +20,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

View File

@@ -18,7 +18,7 @@ from typing import Any
import openai
import pytest
from vllm.utils import get_open_port
from vllm.utils.network_utils import get_open_port
from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases