[Main2Main] Upgrade vLLM to 0303 (#6944)

### What this PR does / why we need it?
break:
- https://github.com/vllm-project/vllm/pull/34102 
Disable_full param replaced with valid_modes/invalid_modes API
- https://github.com/vllm-project/vllm/pull/35503
Now must return float compilation_time
- https://github.com/vllm-project/vllm/pull/35564
New sequence_lengths param added
- https://github.com/vllm-project/vllm/pull/33807
A check was performed (if runner_backend != "auto")
- https://github.com/vllm-project/vllm/pull/34861
`BaseDeviceCommunicator` now accesses PyTorch's internal `pg_map` to
check process group state
- https://github.com/vllm-project/vllm/pull/35274

**Important change:**
- https://github.com/vllm-project/vllm/pull/28672

`matcher_utils` directly accesses `torch.ops._C.*` during the import
phase. In the Ascend environment, some unregistered ops trigger
`AttributeError`, causing e2e initialization failure.

https://github.com/vllm-project/vllm-ascend/actions/runs/22607260487/job/65502047131#step:10:2323

https://github.com/vllm-project/vllm/blob/main/vllm/compilation/passes/fusion/matcher_utils.py#L29

This PR adds temporary compatibility placeholders (rms_norm,
fused_add_rms_norm, rotate_embedding, static/dynamic fp8 quant,
silu_and_mul) to
`vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py` to
ensure no crashes during the import phase. Upstream repairs will be
considered later.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.16.0
- vLLM main:
15d76f74e2

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: gcanlin <canlinguosdu@gmail.com>
Co-authored-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
Co-authored-by: gcanlin <canlinguosdu@gmail.com>
This commit is contained in:
SILONG ZENG
2026-03-06 09:08:52 +08:00
committed by GitHub
parent 640ecd1b77
commit bd571cf6d6
15 changed files with 87 additions and 28 deletions

View File

@@ -120,6 +120,7 @@ from vllm_ascend.utils import (
is_moe_model,
lmhead_tp_enable,
set_weight_prefetch_method,
vllm_version_is,
)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
from vllm_ascend.worker.pcp_utils import PCPManager
@@ -1826,16 +1827,26 @@ class NPUModelRunner(GPUModelRunner):
has_lora = len(self.input_batch.lora_id_to_lora_request) > 0 if force_has_lora is None else force_has_lora
# ruff: noqa: E731
dispatch_cudagraph = (
lambda num_tokens, disable_full: self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
if not force_eager
else (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))
)
def dispatch_cudagraph(num_tokens, disable_full=False, valid_modes=None):
if force_eager:
return (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))
if vllm_version_is("0.16.0"):
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
else:
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
valid_modes=valid_modes,
invalid_modes={CUDAGraphMode.FULL} if disable_full else None,
)
cudagraph_mode, batch_descriptor = dispatch_cudagraph(num_tokens_padded, use_cascade_attn or has_encoder_output)
num_tokens_padded = batch_descriptor.num_tokens
if enable_sp(self.vllm_config):
@@ -1856,10 +1867,16 @@ class NPUModelRunner(GPUModelRunner):
dp_rank = self.parallel_config.data_parallel_rank
num_tokens_padded = int(num_tokens_across_dp[dp_rank].item())
# Re-dispatch with DP padding
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
if vllm_version_is("0.16.0"):
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
else:
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
valid_modes={CUDAGraphMode(synced_cudagraph_mode)},
)
# Assert to make sure the agreed upon token count is correct otherwise
# num_tokens_across_dp will no-longer be valid
assert batch_descriptor.num_tokens == num_tokens_padded

View File

@@ -430,7 +430,7 @@ class NPUWorker(WorkerBase):
with context, set_current_vllm_config(self.vllm_config):
self.model_runner.load_model()
def compile_or_warm_up_model(self) -> None:
def compile_or_warm_up_model(self) -> float:
# Note: need to adapt for graph mode.
warmup_sizes = (self.vllm_config.compilation_config.compile_sizes or []).copy()
if not self.model_config.enforce_eager:
@@ -462,6 +462,7 @@ class NPUWorker(WorkerBase):
# Reset the seed to ensure that the random state is not affected by
# the model initialization and profiling.
set_random_seed(self.model_config.seed)
return self.vllm_config.compilation_config.compilation_time
def _warm_up_atb(self):
x = torch.rand((2, 4), dtype=torch.float16).npu()