[Main2Main] Upgrade vllm commit to releases/v0.14.0 (#5911)
### What this PR does / why we need it?
Upgrade vllm commit to releases/v0.14.0
- Re-open cases in `tests/e2e/singlecard/pooling/test_scoring.py`, since
the errors before have been fixed by
https://github.com/vllm-project/vllm/pull/32243
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
11b6af5280
Signed-off-by: wjunLu <wjunlu217@gmail.com>
This commit is contained in:
2
.github/workflows/_e2e_test.yaml
vendored
2
.github/workflows/_e2e_test.yaml
vendored
@@ -446,4 +446,4 @@ jobs:
|
||||
PYTORCH_NPU_ALLOC_CONF: max_split_size_mb:256
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
run: |
|
||||
pytest -sv --durations=0 tests/e2e/310p/test_offline_inference_parallel_310p.py
|
||||
pytest -sv --durations=0 tests/e2e/310p/test_offline_inference_parallel_310p.py
|
||||
2
.github/workflows/bot_pr_create.yaml
vendored
2
.github/workflows/bot_pr_create.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
steps:
|
||||
- name: Get vLLM version
|
||||
run: |
|
||||
VLLM_COMMIT=11b6af5280d6d6dfb8953af16e67b25f819b3be9
|
||||
VLLM_COMMIT=2c24bc6996cb165fce92f780b388a5e39b3f4060
|
||||
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout repository
|
||||
|
||||
2
.github/workflows/pr_test_full.yaml
vendored
2
.github/workflows/pr_test_full.yaml
vendored
@@ -75,7 +75,7 @@ jobs:
|
||||
name: e2e-full
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0]
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.e2e_tracker == 'true' }}
|
||||
uses: ./.github/workflows/_e2e_test.yaml
|
||||
|
||||
6
.github/workflows/pr_test_light.yaml
vendored
6
.github/workflows/pr_test_light.yaml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
lint:
|
||||
uses: ./.github/workflows/_pre_commit.yml
|
||||
with:
|
||||
vllm: 11b6af5280d6d6dfb8953af16e67b25f819b3be9
|
||||
vllm: 2c24bc6996cb165fce92f780b388a5e39b3f4060
|
||||
changes:
|
||||
runs-on: linux-aarch64-a2-0
|
||||
outputs:
|
||||
@@ -84,7 +84,7 @@ jobs:
|
||||
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0]
|
||||
uses: ./.github/workflows/_unit_test.yaml
|
||||
with:
|
||||
vllm: ${{ matrix.vllm_version }}
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
name: e2e-light
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0]
|
||||
vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0]
|
||||
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
||||
needs: [lint, changes]
|
||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
name: refresh codecov
|
||||
strategy:
|
||||
matrix:
|
||||
vllm_version: [11b6af5280d6d6dfb8953af16e67b25f819b3be9]
|
||||
vllm_version: [2c24bc6996cb165fce92f780b388a5e39b3f4060]
|
||||
uses: ./.github/workflows/_unit_test.yaml
|
||||
with:
|
||||
vllm: ${{ matrix.vllm_version }}
|
||||
|
||||
@@ -53,7 +53,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL
|
||||
|
||||
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|
||||
|-------------|--------------|------------------|-------------|--------------------|
|
||||
| main | 11b6af5280d6d6dfb8953af16e67b25f819b3be9, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
|
||||
| main | 2c24bc6996cb165fce92f780b388a5e39b3f4060, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 |
|
||||
|
||||
## Release cadence
|
||||
|
||||
|
||||
@@ -5,8 +5,6 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
from tests.e2e.conftest import HfRunner, VllmRunner
|
||||
|
||||
CROSS_ENCODER_MODELS = [
|
||||
@@ -35,10 +33,6 @@ DTYPE = "half"
|
||||
def model_name(request):
|
||||
yield snapshot_download(request.param)
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_1_to_1(model_name):
|
||||
text_pair = [TEXTS_1[0], TEXTS_2[0]]
|
||||
|
||||
@@ -58,10 +52,6 @@ def test_cross_encoder_score_1_to_1(model_name):
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_1_to_N(model_name):
|
||||
text_pairs = [
|
||||
[TEXTS_1[0], TEXTS_2[0]],
|
||||
@@ -85,10 +75,6 @@ def test_cross_encoder_score_1_to_N(model_name):
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not vllm_version_is('0.13.0'),
|
||||
reason="vLLM PR-32148 changed the behavior of cross scoring",
|
||||
)
|
||||
def test_cross_encoder_score_N_to_N(model_name):
|
||||
text_pairs = [
|
||||
[TEXTS_1[0], TEXTS_2[0]],
|
||||
|
||||
Reference in New Issue
Block a user