[Main2Main] Upgrade vLLM to 0303 (#6944)

### What this PR does / why we need it?
break:
- https://github.com/vllm-project/vllm/pull/34102 
Disable_full param replaced with valid_modes/invalid_modes API
- https://github.com/vllm-project/vllm/pull/35503
Now must return float compilation_time
- https://github.com/vllm-project/vllm/pull/35564
New sequence_lengths param added
- https://github.com/vllm-project/vllm/pull/33807
A check was performed (if runner_backend != "auto")
- https://github.com/vllm-project/vllm/pull/34861
`BaseDeviceCommunicator` now accesses PyTorch's internal `pg_map` to
check process group state
- https://github.com/vllm-project/vllm/pull/35274

**Important change:**
- https://github.com/vllm-project/vllm/pull/28672

`matcher_utils` directly accesses `torch.ops._C.*` during the import
phase. In the Ascend environment, some unregistered ops trigger
`AttributeError`, causing e2e initialization failure.

https://github.com/vllm-project/vllm-ascend/actions/runs/22607260487/job/65502047131#step:10:2323

https://github.com/vllm-project/vllm/blob/main/vllm/compilation/passes/fusion/matcher_utils.py#L29

This PR adds temporary compatibility placeholders (rms_norm,
fused_add_rms_norm, rotate_embedding, static/dynamic fp8 quant,
silu_and_mul) to
`vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py` to
ensure no crashes during the import phase. Upstream repairs will be
considered later.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.16.0
- vLLM main:
15d76f74e2

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: gcanlin <canlinguosdu@gmail.com>
Co-authored-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
Co-authored-by: gcanlin <canlinguosdu@gmail.com>
This commit is contained in:
SILONG ZENG
2026-03-06 09:08:52 +08:00
committed by GitHub
parent 640ecd1b77
commit bd571cf6d6
15 changed files with 87 additions and 28 deletions

View File

@@ -37,7 +37,7 @@ jobs:
steps:
- name: Get vLLM version
run: |
VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV"
- name: Checkout repository

View File

@@ -27,7 +27,7 @@ RUN apt-get update -y && \
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
# For lint purpose, actually we need make a main2main matching.
ARG VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
ARG VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d
RUN git clone $VLLM_REPO /vllm-workspace/vllm && \
cd /vllm-workspace/vllm && \
git checkout $VLLM_COMMIT

View File

@@ -75,7 +75,7 @@ jobs:
name: e2e-full
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
needs: [changes]
if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }}
uses: ./.github/workflows/_e2e_test.yaml

View File

@@ -41,7 +41,7 @@ jobs:
lint:
uses: ./.github/workflows/_pre_commit.yml
with:
vllm: 15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
vllm: 4034c3d32e30d01639459edd3ab486f56993876d
changes:
runs-on: linux-aarch64-a2b3-0
outputs:
@@ -89,7 +89,7 @@ jobs:
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
uses: ./.github/workflows/_unit_test.yaml
with:
vllm: ${{ matrix.vllm_version }}
@@ -101,7 +101,7 @@ jobs:
name: e2e-light
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
# Note (yikun): If CI resource are limited we can split job into two chain jobs
needs: [lint, changes]
# only trigger e2e test after lint passed and the change is e2e related with pull request.

View File

@@ -33,7 +33,7 @@ jobs:
name: refresh codecov
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d]
uses: ./.github/workflows/_unit_test.yaml
with:
vllm: ${{ matrix.vllm_version }}

View File

@@ -57,7 +57,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL
| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|-------------|--------------|------------------|-------------|--------------------|
| main | 4572a06afe96d0a6d5d3efacf130c71505dd2bc9, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |
| main | 4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |
## Release cadence

View File

@@ -70,6 +70,7 @@ class TestAscendModelSlimConfig310(TestBase):
fused_moe_layer = MagicMock(spec=FusedMoE)
fused_moe_layer.moe = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config.moe_backend = "auto"
fused_moe_layer.moe_config.moe_parallel_config = MagicMock(spec=FusedMoEParallelConfig)
fused_moe_layer.moe_config.moe_parallel_config.use_ep = True
fused_moe_layer.moe_config.moe_parallel_config.dp_size = 1

View File

@@ -44,7 +44,8 @@ class TestNPUCommunicator(unittest.TestCase):
gather_sizes = [2, 2]
input_ = torch.tensor([10, 20, 30, 40])
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
output = comm.all_to_all(input_,
scatter_sizes=scatter_sizes,
@@ -84,7 +85,8 @@ class TestNPUCommunicator(unittest.TestCase):
input_ = torch.tensor([[10, 20], [30, 40]])
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0)
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0)
assert output.tolist() == [[10, 20], [50, 60]]

View File

@@ -22,7 +22,6 @@ import time
from collections import defaultdict
from dataclasses import dataclass, fields
from vllm._bc_linter import bc_linter_include
from vllm.config import SchedulerConfig, VllmConfig
from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata
from vllm.distributed.kv_events import KVEventBatch
@@ -73,7 +72,6 @@ class RecomputeReqInfo:
client_index: int = 0
@bc_linter_include
@dataclass
class RecomputeSchedulerOutput(SchedulerOutput):
recomputed_reqs: list[RecomputeReqInfo] | None = None

View File

@@ -96,6 +96,7 @@ class AscendMMEncoderAttention(MMEncoderAttention):
value: torch.Tensor,
cu_seqlens: torch.Tensor | None = None,
max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention
sequence_lengths: torch.Tensor | None = None,
):
bsz, q_len = query.size()[:2]
kv_len = key.size(1)

View File

@@ -94,6 +94,20 @@
# Future Plan:
# Remove this patch when vLLM merge the PR.
#
# ** 6. File: platform/patch_fusion_matcher_compat_ops.py**
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. `torch.ops._C.rms_norm`, `torch.ops._C.fused_add_rms_norm`,
# Why:
# upstream vLLM initializes fusion matcher global operators at import time.
# On Ascend environment these symbols may be absent and cause import failure.
# How
# inject placeholders only when the symbols are missing so import can continue.
# Related PR (if no, explain why):
# temporary compatibility patch before upstream adjustment is merged.
# Future Plan:
# remove this patch once upstream no longer requires these global symbols or
# provides a backend-safe initialization path.
#
# * Worker Patch:
# ===============
#

View File

@@ -17,6 +17,7 @@
import os
import vllm_ascend.patch.platform.patch_distributed # noqa
import vllm_ascend.patch.platform.patch_fusion_matcher_compat_ops # noqa
import vllm_ascend.patch.platform.patch_mamba_config # noqa
import vllm_ascend.patch.platform.patch_sched_yield # noqa

View File

@@ -0,0 +1,24 @@
import torch
class _MissingOp:
def __init__(self, op_name: str):
self.op_name = op_name
self.default = self
def __call__(self, *args, **kwargs):
raise RuntimeError(f"Missing upstream op `{self.op_name}` was invoked.")
def _set_missing(namespace, op_name: str, full_name: str) -> None:
if not hasattr(namespace, op_name):
setattr(namespace, op_name, _MissingOp(full_name))
_set_missing(torch.ops._C, "rms_norm", "torch.ops._C.rms_norm")
_set_missing(torch.ops._C, "fused_add_rms_norm", "torch.ops._C.fused_add_rms_norm")
_set_missing(torch.ops._C, "rotary_embedding", "torch.ops._C.rotary_embedding")
_set_missing(torch.ops._C, "static_scaled_fp8_quant", "torch.ops._C.static_scaled_fp8_quant")
_set_missing(torch.ops._C, "dynamic_scaled_fp8_quant", "torch.ops._C.dynamic_scaled_fp8_quant")
_set_missing(torch.ops._C, "dynamic_per_token_scaled_fp8_quant", "torch.ops._C.dynamic_per_token_scaled_fp8_quant")
_set_missing(torch.ops._C, "silu_and_mul", "torch.ops._C.silu_and_mul")

View File

@@ -120,6 +120,7 @@ from vllm_ascend.utils import (
is_moe_model,
lmhead_tp_enable,
set_weight_prefetch_method,
vllm_version_is,
)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
from vllm_ascend.worker.pcp_utils import PCPManager
@@ -1826,16 +1827,26 @@ class NPUModelRunner(GPUModelRunner):
has_lora = len(self.input_batch.lora_id_to_lora_request) > 0 if force_has_lora is None else force_has_lora
# ruff: noqa: E731
dispatch_cudagraph = (
lambda num_tokens, disable_full: self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
if not force_eager
else (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))
)
def dispatch_cudagraph(num_tokens, disable_full=False, valid_modes=None):
if force_eager:
return (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))
if vllm_version_is("0.16.0"):
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
else:
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
valid_modes=valid_modes,
invalid_modes={CUDAGraphMode.FULL} if disable_full else None,
)
cudagraph_mode, batch_descriptor = dispatch_cudagraph(num_tokens_padded, use_cascade_attn or has_encoder_output)
num_tokens_padded = batch_descriptor.num_tokens
if enable_sp(self.vllm_config):
@@ -1856,10 +1867,16 @@ class NPUModelRunner(GPUModelRunner):
dp_rank = self.parallel_config.data_parallel_rank
num_tokens_padded = int(num_tokens_across_dp[dp_rank].item())
# Re-dispatch with DP padding
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
if vllm_version_is("0.16.0"):
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
else:
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
valid_modes={CUDAGraphMode(synced_cudagraph_mode)},
)
# Assert to make sure the agreed upon token count is correct otherwise
# num_tokens_across_dp will no-longer be valid
assert batch_descriptor.num_tokens == num_tokens_padded

View File

@@ -430,7 +430,7 @@ class NPUWorker(WorkerBase):
with context, set_current_vllm_config(self.vllm_config):
self.model_runner.load_model()
def compile_or_warm_up_model(self) -> None:
def compile_or_warm_up_model(self) -> float:
# Note: need to adapt for graph mode.
warmup_sizes = (self.vllm_config.compilation_config.compile_sizes or []).copy()
if not self.model_config.enforce_eager:
@@ -462,6 +462,7 @@ class NPUWorker(WorkerBase):
# Reset the seed to ensure that the random state is not affected by
# the model initialization and profiling.
set_random_seed(self.model_config.seed)
return self.vllm_config.compilation_config.compilation_time
def _warm_up_atb(self):
x = torch.rand((2, 4), dtype=torch.float16).npu()