Main2main upgrade vllm commit to 03 19 17:00 (#7478)
### What this PR does / why we need it?
Upgrade vllm commit to 2026.03.19.
1.Fix socket removed from StatelessProcessGroup. Upstream vLLM PR
[#36330](https://github.com/vllm-project/vllm/pull/36330) ("elastic_ep:
Fix stateless group port races") refactored StatelessProcessGroup and
removed the socket: socket.socket | None field. The socket ownership was
moved to a new create_tcp_store() helper instead of being stored as a
field on the dataclass.
2.fix `virtual_engine` parameter removed from `set_forward_context().
Upstream [V0 Deprecation] Deprecate virtual engine
[#37195](https://github.com/vllm-project/vllm/pull/37195)
### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
NA
- vLLM version: v0.17.0
- vLLM main:
8b6325758c
---------
Signed-off-by: leo-pony <nengjunma@outlook.com>
This commit is contained in:
2
.github/workflows/bot_pr_create.yaml
vendored
2
.github/workflows/bot_pr_create.yaml
vendored
@@ -37,7 +37,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Get vLLM version
|
- name: Get vLLM version
|
||||||
run: |
|
run: |
|
||||||
VLLM_COMMIT=8b6325758cce5f9c36d38f2462edbd368b97a07c
|
VLLM_COMMIT=6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209
|
||||||
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV"
|
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ RUN apt-get update -y && \
|
|||||||
|
|
||||||
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
|
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
|
||||||
# For lint purpose, actually we need make a main2main matching.
|
# For lint purpose, actually we need make a main2main matching.
|
||||||
ARG VLLM_COMMIT=8b6325758cce5f9c36d38f2462edbd368b97a07c
|
ARG VLLM_COMMIT=6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209
|
||||||
RUN git clone $VLLM_REPO /vllm-workspace/vllm && \
|
RUN git clone $VLLM_REPO /vllm-workspace/vllm && \
|
||||||
cd /vllm-workspace/vllm && \
|
cd /vllm-workspace/vllm && \
|
||||||
git checkout $VLLM_COMMIT
|
git checkout $VLLM_COMMIT
|
||||||
|
|||||||
2
.github/workflows/pr_test_full.yaml
vendored
2
.github/workflows/pr_test_full.yaml
vendored
@@ -75,7 +75,7 @@ jobs:
|
|||||||
name: e2e-full
|
name: e2e-full
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0]
|
vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0]
|
||||||
needs: [changes]
|
needs: [changes]
|
||||||
if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }}
|
if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }}
|
||||||
uses: ./.github/workflows/_e2e_test.yaml
|
uses: ./.github/workflows/_e2e_test.yaml
|
||||||
|
|||||||
6
.github/workflows/pr_test_light.yaml
vendored
6
.github/workflows/pr_test_light.yaml
vendored
@@ -41,7 +41,7 @@ jobs:
|
|||||||
lint:
|
lint:
|
||||||
uses: ./.github/workflows/_pre_commit.yml
|
uses: ./.github/workflows/_pre_commit.yml
|
||||||
with:
|
with:
|
||||||
vllm: 8b6325758cce5f9c36d38f2462edbd368b97a07c
|
vllm: 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209
|
||||||
changes:
|
changes:
|
||||||
runs-on: linux-aarch64-a2b3-0
|
runs-on: linux-aarch64-a2b3-0
|
||||||
outputs:
|
outputs:
|
||||||
@@ -90,7 +90,7 @@ jobs:
|
|||||||
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
|
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0]
|
vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0]
|
||||||
uses: ./.github/workflows/_unit_test.yaml
|
uses: ./.github/workflows/_unit_test.yaml
|
||||||
with:
|
with:
|
||||||
vllm: ${{ matrix.vllm_version }}
|
vllm: ${{ matrix.vllm_version }}
|
||||||
@@ -102,7 +102,7 @@ jobs:
|
|||||||
name: e2e-light
|
name: e2e-light
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0]
|
vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0]
|
||||||
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
# Note (yikun): If CI resource are limited we can split job into two chain jobs
|
||||||
needs: [lint, changes]
|
needs: [lint, changes]
|
||||||
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
# only trigger e2e test after lint passed and the change is e2e related with pull request.
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ jobs:
|
|||||||
name: refresh codecov
|
name: refresh codecov
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c]
|
vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209]
|
||||||
uses: ./.github/workflows/_unit_test.yaml
|
uses: ./.github/workflows/_unit_test.yaml
|
||||||
with:
|
with:
|
||||||
vllm: ${{ matrix.vllm_version }}
|
vllm: ${{ matrix.vllm_version }}
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL
|
|||||||
|
|
||||||
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|
||||||
|-------------|--------------|------------------|-------------|--------------------|
|
|-------------|--------------|------------------|-------------|--------------------|
|
||||||
| main | 8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |
|
| main | 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |
|
||||||
|
|
||||||
## Release cadence
|
## Release cadence
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from vllm.distributed.utils import StatelessProcessGroup
|
|||||||
from tests.ut.base import TestBase
|
from tests.ut.base import TestBase
|
||||||
from vllm_ascend.distributed.device_communicators.pyhccl import \
|
from vllm_ascend.distributed.device_communicators.pyhccl import \
|
||||||
PyHcclCommunicator
|
PyHcclCommunicator
|
||||||
|
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, vllm_version_is
|
||||||
|
|
||||||
|
|
||||||
class MockHcclLib:
|
class MockHcclLib:
|
||||||
@@ -45,10 +46,15 @@ class TestPyHcclCommunicator(TestBase):
|
|||||||
@patch("vllm_ascend.utils.current_stream",
|
@patch("vllm_ascend.utils.current_stream",
|
||||||
return_value=MagicMock(npu_stream=5678))
|
return_value=MagicMock(npu_stream=5678))
|
||||||
def test_stateless_group(self, *_):
|
def test_stateless_group(self, *_):
|
||||||
|
if vllm_version_is("0.18.0"):
|
||||||
group = StatelessProcessGroup(rank=3,
|
group = StatelessProcessGroup(rank=3,
|
||||||
world_size=4,
|
world_size=4,
|
||||||
store=None,
|
store=None,
|
||||||
socket=None)
|
socket=None)
|
||||||
|
else:
|
||||||
|
group = StatelessProcessGroup(rank=3,
|
||||||
|
world_size=4,
|
||||||
|
store=None)
|
||||||
|
|
||||||
comm = PyHcclCommunicator(group=group, device=3)
|
comm = PyHcclCommunicator(group=group, device=3)
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from vllm_ascend.utils import (
|
|||||||
is_drafter_moe_model,
|
is_drafter_moe_model,
|
||||||
is_moe_model,
|
is_moe_model,
|
||||||
speculative_enable_dispatch_gmm_combine_decode,
|
speculative_enable_dispatch_gmm_combine_decode,
|
||||||
|
vllm_version_is,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -53,13 +54,14 @@ def set_ascend_forward_context(
|
|||||||
forward_context_kwargs = {
|
forward_context_kwargs = {
|
||||||
"attn_metadata": attn_metadata,
|
"attn_metadata": attn_metadata,
|
||||||
"vllm_config": vllm_config,
|
"vllm_config": vllm_config,
|
||||||
"virtual_engine": virtual_engine,
|
|
||||||
"num_tokens": num_tokens,
|
"num_tokens": num_tokens,
|
||||||
"num_tokens_across_dp": num_tokens_across_dp,
|
"num_tokens_across_dp": num_tokens_across_dp,
|
||||||
"cudagraph_runtime_mode": aclgraph_runtime_mode,
|
"cudagraph_runtime_mode": aclgraph_runtime_mode,
|
||||||
"batch_descriptor": batch_descriptor,
|
"batch_descriptor": batch_descriptor,
|
||||||
"skip_compiled": skip_compiled,
|
"skip_compiled": skip_compiled,
|
||||||
}
|
}
|
||||||
|
if vllm_version_is("0.18.0"):
|
||||||
|
forward_context_kwargs["virtual_engine"] = virtual_engine
|
||||||
|
|
||||||
with set_forward_context(**forward_context_kwargs):
|
with set_forward_context(**forward_context_kwargs):
|
||||||
forward_context = get_forward_context()
|
forward_context = get_forward_context()
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
|||||||
|
|
||||||
from vllm_ascend.ascend_config import get_ascend_config
|
from vllm_ascend.ascend_config import get_ascend_config
|
||||||
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
|
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
|
||||||
from vllm_ascend.utils import is_vl_model, parse_layer_idx
|
from vllm_ascend.utils import is_vl_model, parse_layer_idx, vllm_version_is
|
||||||
|
|
||||||
|
|
||||||
class IndexerWrapper(nn.Module):
|
class IndexerWrapper(nn.Module):
|
||||||
@@ -183,7 +183,7 @@ def mla_forward(
|
|||||||
attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name]
|
attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name]
|
||||||
else:
|
else:
|
||||||
attn_metadata = forward_context.attn_metadata
|
attn_metadata = forward_context.attn_metadata
|
||||||
kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine]
|
kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0]
|
||||||
self.mla_attn.impl.forward(
|
self.mla_attn.impl.forward(
|
||||||
self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output
|
self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from vllm.v1.attention.backends.utils import PAD_SLOT_ID
|
|||||||
from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector
|
from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector
|
||||||
from vllm_ascend.ops.triton.fla.sigmoid_gating import fused_sigmoid_gating_delta_rule_update
|
from vllm_ascend.ops.triton.fla.sigmoid_gating import fused_sigmoid_gating_delta_rule_update
|
||||||
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
|
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
|
||||||
from vllm_ascend.utils import enable_sp
|
from vllm_ascend.utils import enable_sp, vllm_version_is
|
||||||
|
|
||||||
|
|
||||||
class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet):
|
class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet):
|
||||||
@@ -66,7 +66,7 @@ class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet):
|
|||||||
non_spec_token_indx = attn_metadata.non_spec_token_indx
|
non_spec_token_indx = attn_metadata.non_spec_token_indx
|
||||||
spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501
|
spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501
|
||||||
non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501
|
non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501
|
||||||
self_kv_cache = self.kv_cache[forward_context.virtual_engine]
|
self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0]
|
||||||
conv_state = self_kv_cache[0].transpose(-1, -2)
|
conv_state = self_kv_cache[0].transpose(-1, -2)
|
||||||
ssm_state = self_kv_cache[1]
|
ssm_state = self_kv_cache[1]
|
||||||
num_actual_tokens = attn_metadata.num_actual_tokens
|
num_actual_tokens = attn_metadata.num_actual_tokens
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ from vllm.v1.attention.backends.utils import PAD_SLOT_ID
|
|||||||
from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector
|
from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector
|
||||||
from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat
|
from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat
|
||||||
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
|
from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch
|
||||||
from vllm_ascend.utils import enable_sp
|
from vllm_ascend.utils import enable_sp, vllm_version_is
|
||||||
|
|
||||||
|
|
||||||
class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet):
|
class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet):
|
||||||
@@ -124,7 +124,7 @@ class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet):
|
|||||||
non_spec_token_indx = attn_metadata.non_spec_token_indx
|
non_spec_token_indx = attn_metadata.non_spec_token_indx
|
||||||
spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501
|
spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501
|
||||||
non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501
|
non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501
|
||||||
self_kv_cache = self.kv_cache[forward_context.virtual_engine]
|
self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0]
|
||||||
conv_state = self_kv_cache[0].transpose(-1, -2)
|
conv_state = self_kv_cache[0].transpose(-1, -2)
|
||||||
ssm_state = self_kv_cache[1]
|
ssm_state = self_kv_cache[1]
|
||||||
num_actual_tokens = attn_metadata.num_actual_tokens
|
num_actual_tokens = attn_metadata.num_actual_tokens
|
||||||
|
|||||||
@@ -551,7 +551,7 @@ class NPUPlatform(Platform):
|
|||||||
attn_metadata: dict[str, Any],
|
attn_metadata: dict[str, Any],
|
||||||
vllm_config: VllmConfig,
|
vllm_config: VllmConfig,
|
||||||
dp_metadata,
|
dp_metadata,
|
||||||
virtual_engine: int = 0,
|
virtual_engine: int = 0, # ToDo:: Remove me when upgrade to vllm 0.19.0 from 0.18.0
|
||||||
num_tokens: int = 0,
|
num_tokens: int = 0,
|
||||||
num_tokens_across_dp: torch.Tensor | None = None,
|
num_tokens_across_dp: torch.Tensor | None = None,
|
||||||
cudagraph_runtime_mode=None,
|
cudagraph_runtime_mode=None,
|
||||||
|
|||||||
Reference in New Issue
Block a user