[main2main] upgrade vllm main 0202 (#6560)
### What this PR does / why we need it? 1. Fix `TypeError: FusedMoEParallelConfig.__init__() missing 1 required positional argument: 'is_sequence_parallel'` due to https://github.com/vllm-project/vllm/pull/32567 2. Fix ` TypeError: '>' not supported between instances of 'MagicMock' and 'int'` due to https://github.com/vllm-project/vllm/pull/33035 3. Fix `TypeError: Can't instantiate abstract class AscendMLAImpl with abstract methods forward_mha, forward_mqa` and AttributeError: 'bool' object has no attribute 'process_weights_after_loading' due to https://github.com/vllm-project/vllm/pull/33284 4. Fix `'AscendSharedFusedMoE' object has no attribute '_routed_input_transform'`due to https://github.com/vllm-project/vllm/pull/32790 5. Fix `NPUModelRunner._dummy_run() got an unexpected keyword argument 'num_active_loras'` due to https://github.com/vllm-project/vllm/pull/32005 6. Fix the problem caused by` 'tuple' object has no attribute 'job_id'` due to https://github.com/vllm-project/vllm/pull/27492 7. Fix the problem that all_moe_layers is not equal to vllm.moe_forward, vllm.moe_forward_shared due to https://github.com/vllm-project/vllm/pull/33184 8. Add patch to fix the problem "got multiple values for keyword argument 'add_special_tokens'" due to https://github.com/vllm-project/vllm/pull/32863 ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 --------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com> Signed-off-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -1450,6 +1450,28 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
def get_num_actual_tokens(self, attn_metadata: M):
|
||||
return attn_metadata.num_actual_tokens
|
||||
|
||||
def forward_mha(
|
||||
self,
|
||||
layer_name: str,
|
||||
hidden_states: torch.Tensor,
|
||||
kv_cache: tuple[torch.Tensor],
|
||||
attn_metadata: M,
|
||||
need_gather_q_kv: bool = False,
|
||||
output: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
raise NotImplementedError("forward_mha is not supported for MLA attention. Use forward() instead.")
|
||||
|
||||
def forward_mqa(
|
||||
self,
|
||||
layer_name: str,
|
||||
hidden_states: torch.Tensor,
|
||||
kv_cache: tuple[torch.Tensor],
|
||||
attn_metadata: M,
|
||||
need_gather_q_kv: bool = False,
|
||||
output: torch.Tensor | None = None,
|
||||
) -> torch.Tensor:
|
||||
raise NotImplementedError("forward_mqa is not supported for MLA attention. Use forward() instead.")
|
||||
|
||||
def forward(
|
||||
self,
|
||||
layer_name,
|
||||
|
||||
@@ -1062,3 +1062,24 @@ class AscendSFAImpl(MLAAttentionImpl):
|
||||
torch.distributed.all_to_all_single(attn_output, send, group=get_tp_group().device_group)
|
||||
|
||||
return attn_output, True
|
||||
|
||||
def forward_mha(
|
||||
self,
|
||||
q: torch.Tensor,
|
||||
kv_c_normed: torch.Tensor,
|
||||
k_pe: torch.Tensor,
|
||||
kv_c_and_k_pe_cache: torch.Tensor,
|
||||
attn_metadata: M,
|
||||
k_scale: torch.Tensor,
|
||||
output: torch.Tensor,
|
||||
) -> None:
|
||||
raise NotImplementedError("forward_mha is not supported for SFA attention. Use forward() instead.")
|
||||
|
||||
def forward_mqa(
|
||||
self,
|
||||
q: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
|
||||
kv_c_and_k_pe_cache: torch.Tensor,
|
||||
attn_metadata: M,
|
||||
layer,
|
||||
) -> tuple[torch.Tensor, torch.Tensor | None]:
|
||||
raise NotImplementedError("forward_mqa is not supported for SFA attention. Use forward() instead.")
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
|
||||
import torch
|
||||
import torchair
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.config import VllmConfig, get_layers_from_vllm_config
|
||||
from vllm.config.compilation import Range
|
||||
from vllm.logger import logger
|
||||
@@ -27,6 +26,12 @@ from vllm_ascend.compilation.npugraph_ex_passes.utils.npugraph_ex_utils_check im
|
||||
check_and_register_fusion_pass,
|
||||
extra_stream_scope_check,
|
||||
)
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
|
||||
class GraphEXQKNormRopeFusionPattern:
|
||||
|
||||
@@ -18,12 +18,18 @@
|
||||
import torch
|
||||
import torch._inductor.pattern_matcher as pm
|
||||
from torch._inductor.pattern_matcher import PatternMatcherPass, PatternPrettyPrinter
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.compilation.vllm_inductor_pass import VllmInductorPass
|
||||
from vllm.config import VllmConfig, get_layers_from_vllm_config
|
||||
from vllm.config.compilation import Range
|
||||
from vllm.logger import logger
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
|
||||
class QKNormRopeFusionPattern:
|
||||
def __init__(self, vllm_config, head_dim, num_heads, num_kv_heads, eps=1e-6):
|
||||
|
||||
@@ -10,7 +10,6 @@ from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
import torch
|
||||
from vllm.attention.layer import Attention, MLAAttention
|
||||
from vllm.config import VllmConfig, get_layers_from_vllm_config
|
||||
from vllm.distributed.ec_transfer import get_ec_transfer, has_ec_transfer
|
||||
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole
|
||||
@@ -27,6 +26,7 @@ from vllm_ascend.distributed.kv_transfer.kv_pool.cpu_offload.metadata import (
|
||||
MetadataServerProc,
|
||||
MLAConfig,
|
||||
)
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from vllm.forward_context import ForwardContext
|
||||
@@ -35,6 +35,11 @@ if TYPE_CHECKING:
|
||||
from vllm.v1.kv_cache_interface import KVCacheConfig
|
||||
from vllm.v1.request import Request
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention, MLAAttention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention, MLAAttention
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReqMeta:
|
||||
|
||||
@@ -6,6 +6,8 @@ from vllm.v1.attention.backend import AttentionBackend # type: ignore
|
||||
from vllm.v1.kv_offload.mediums import CPULoadStoreSpec, GPULoadStoreSpec
|
||||
from vllm.v1.kv_offload.worker.worker import OffloadingHandler, TransferResult, TransferSpec
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
@@ -153,12 +155,30 @@ class CpuNpuOffloadingHandler(OffloadingHandler):
|
||||
|
||||
def get_finished(self) -> list[TransferResult]:
|
||||
results: list[TransferResult] = []
|
||||
for job_id, event in self.transfer_events.items():
|
||||
if event.query():
|
||||
results.append((job_id, True))
|
||||
self.events_pool.append(event)
|
||||
for job_id, _ in results:
|
||||
del self.transfer_events[job_id]
|
||||
if vllm_version_is("v0.15.0"):
|
||||
for job_id, event in self.transfer_events.items():
|
||||
if event.query():
|
||||
results.append((job_id, True))
|
||||
self.events_pool.append(event)
|
||||
for job_id, _ in results:
|
||||
del self.transfer_events[job_id]
|
||||
else:
|
||||
finished_job_ids = []
|
||||
for job_id, event in self.transfer_events.items():
|
||||
if event.query():
|
||||
results.append(
|
||||
TransferResult(
|
||||
job_id=job_id,
|
||||
success=True,
|
||||
transfer_size=None,
|
||||
transfer_time=None,
|
||||
transfer_type=None,
|
||||
)
|
||||
)
|
||||
finished_job_ids.append(job_id)
|
||||
self.events_pool.append(event)
|
||||
for job_id in finished_job_ids:
|
||||
del self.transfer_events[job_id]
|
||||
return results
|
||||
|
||||
def wait(self, job_ids: set[int]) -> None:
|
||||
|
||||
@@ -46,7 +46,8 @@ from vllm_ascend.ops.fused_moe.prepare_finalize import QuantType
|
||||
from vllm_ascend.utils import (AscendDeviceType, enable_sp,
|
||||
get_ascend_device_type, maybe_trans_nz,
|
||||
npu_stream_switch, shared_expert_dp_enabled,
|
||||
shared_experts_calculation_stream)
|
||||
shared_experts_calculation_stream,
|
||||
vllm_version_is)
|
||||
|
||||
@dataclass
|
||||
class FusedMoEResult:
|
||||
@@ -407,10 +408,13 @@ class AscendSharedFusedMoE(SharedFusedMoE, AscendFusedMoE):
|
||||
shared_experts: torch.nn.Module,
|
||||
gate: Optional[torch.nn.Module] = None,
|
||||
use_overlapped: bool = True,
|
||||
routed_input_transform: Optional[torch.nn.Module] = None,
|
||||
**kwargs,
|
||||
):
|
||||
AscendFusedMoE.__init__(self, **kwargs)
|
||||
|
||||
if not vllm_version_is("0.15.0"):
|
||||
self._routed_input_transform = routed_input_transform
|
||||
self._shared_experts = shared_experts
|
||||
self.use_overlapped = use_overlapped
|
||||
self.shared_expert_stream = None
|
||||
|
||||
@@ -23,7 +23,6 @@ from typing import Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from vllm.attention.layer import MLAAttention
|
||||
from vllm.config import CacheConfig, get_current_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
@@ -34,6 +33,12 @@ from vllm.utils.torch_utils import direct_register_custom_op
|
||||
from vllm.v1.attention.backend import AttentionMetadata # type: ignore
|
||||
|
||||
from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import MLAAttention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import MLAAttention
|
||||
|
||||
|
||||
class IndexerWrapper(nn.Module):
|
||||
@@ -125,6 +130,16 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
|
||||
o_proj=mla_modules.o_proj,
|
||||
)
|
||||
|
||||
original_process_weights = self.mla_attn.process_weights_after_loading
|
||||
|
||||
def wrapped_process_weights(act_dtype: torch.dtype):
|
||||
from vllm_ascend.attention.sfa_v1 import AscendSFAImpl
|
||||
if not isinstance(self.mla_attn.impl, AscendSFAImpl):
|
||||
original_process_weights(act_dtype)
|
||||
self.mla_attn.impl.process_weights_after_loading(act_dtype)
|
||||
|
||||
self.mla_attn.process_weights_after_loading = wrapped_process_weights
|
||||
|
||||
compilation_config = get_current_vllm_config().compilation_config
|
||||
if prefix in compilation_config.static_forward_context:
|
||||
raise ValueError(f"Duplicate layer name: {prefix}")
|
||||
|
||||
@@ -33,3 +33,4 @@ import vllm_ascend.patch.worker.patch_qwen3_next_mtp # noqa
|
||||
import vllm_ascend.patch.worker.patch_rejection_sampler # noqa
|
||||
import vllm_ascend.patch.worker.patch_qwen3_next # noqa
|
||||
import vllm_ascend.patch.worker.patch_v2_egale # noqa
|
||||
import vllm_ascend.patch.worker.patch_huanyuan_vl # noqa
|
||||
|
||||
27
vllm_ascend/patch/worker/patch_huanyuan_vl.py
Normal file
27
vllm_ascend/patch/worker/patch_huanyuan_vl.py
Normal file
@@ -0,0 +1,27 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#from collections.abc import Iterable
|
||||
|
||||
from vllm.transformers_utils.processors.hunyuan_vl import HunYuanVLProcessor
|
||||
|
||||
_original_call = HunYuanVLProcessor.__call__
|
||||
|
||||
def _patched_call(self, images=None, text=None, videos=None, **kwargs):
|
||||
"""Remove add_special_tokens requirement."""
|
||||
kwargs.pop("add_special_tokens", None)
|
||||
return _original_call(self, images=images, text=text, videos=videos, **kwargs)
|
||||
|
||||
HunYuanVLProcessor.__call__ = _patched_call
|
||||
@@ -1,8 +1,12 @@
|
||||
import torch
|
||||
import vllm.v1.worker.utils as utils
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.v1.worker.utils import defaultdict, extract_layer_index
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
# Without this patch, it will raise an exception when initialize kv_cache.
|
||||
# TODO To remove the patch, we need check why the original bind_kv_cache raises an NotImplementedError.
|
||||
|
||||
@@ -401,7 +401,13 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
self.packed_modules_mapping = packed_modules_model_mapping[
|
||||
model_type]
|
||||
prefix = self.quant_prefix_mapper(model_type, prefix)
|
||||
from vllm.attention.layer import Attention
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
if prefix.startswith("language_model"):
|
||||
prefix = prefix.split('.', 1)[-1]
|
||||
if isinstance(layer, LinearBase):
|
||||
|
||||
@@ -41,7 +41,7 @@ from vllm_ascend.ops.rotary_embedding import update_cos_sin
|
||||
from vllm_ascend.ops.triton.spec_decode.utils import \
|
||||
prepare_inputs_padded_kernel
|
||||
from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
|
||||
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled, lmhead_tp_enable
|
||||
from vllm_ascend.utils import enable_sp, shared_expert_dp_enabled, lmhead_tp_enable, vllm_version_is
|
||||
|
||||
# Currently we will fix block size to a small one since `num_reqs` can't be too large
|
||||
_PREPARE_INPUTS_BLOCK_SIZE = 4
|
||||
@@ -400,6 +400,12 @@ class EagleProposer(VllmEagleProposer):
|
||||
is_draft_model=True,
|
||||
draft_attn_metadatas=multi_steps_attn_metadata):
|
||||
|
||||
if not vllm_version_is("v0.15.0"):
|
||||
# Reset MOE layer index before first model call
|
||||
forward_context = get_forward_context()
|
||||
if forward_context is not None:
|
||||
forward_context.moe_layer_index = 0
|
||||
|
||||
self._runnable(
|
||||
num_input_tokens=num_tokens,
|
||||
batch_size=batch_size,
|
||||
@@ -559,6 +565,12 @@ class EagleProposer(VllmEagleProposer):
|
||||
is_draft_model=True,
|
||||
draft_attn_metadatas=multi_steps_attn_metadata):
|
||||
|
||||
if not vllm_version_is("v0.15.0"):
|
||||
# Reset MOE layer index for forward pass
|
||||
forward_context = get_forward_context()
|
||||
if forward_context is not None:
|
||||
forward_context.moe_layer_index = 0
|
||||
|
||||
draft_token_ids = self._runnable(
|
||||
num_input_tokens=num_input_tokens,
|
||||
batch_size=batch_size,
|
||||
@@ -660,6 +672,12 @@ class EagleProposer(VllmEagleProposer):
|
||||
forward_context.num_accept_tokens = batch_size
|
||||
|
||||
for draft_step in range(self.num_speculative_tokens - 1):
|
||||
if not vllm_version_is("v0.15.0"):
|
||||
# Reset MOE layer index for each draft step iteration
|
||||
forward_context = get_forward_context()
|
||||
if forward_context is not None:
|
||||
forward_context.moe_layer_index = 0
|
||||
|
||||
# Update the inputs.
|
||||
# cast to int32 is crucial when eagle model is compiled.
|
||||
# tensor.argmax() returns int64 by default.
|
||||
|
||||
@@ -18,7 +18,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
||||
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper
|
||||
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
|
||||
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
|
||||
from vllm_ascend.utils import lmhead_tp_enable
|
||||
from vllm_ascend.utils import lmhead_tp_enable, vllm_version_is
|
||||
|
||||
|
||||
class MtpProposer(EagleProposer):
|
||||
@@ -122,6 +122,11 @@ class MtpProposer(EagleProposer):
|
||||
batch_descriptor=batch_descriptor,
|
||||
is_draft_model=True,
|
||||
in_profile_run=is_profile):
|
||||
if not vllm_version_is("v0.15.0"):
|
||||
# Reset MOE layer index for each MTP step iteration
|
||||
forward_context = get_forward_context()
|
||||
if forward_context is not None:
|
||||
forward_context.moe_layer_index = 0
|
||||
previous_hidden_states, positions = self.maybe_pad_and_reduce(
|
||||
previous_hidden_states, positions)
|
||||
self.model(input_ids=input_ids,
|
||||
@@ -330,6 +335,13 @@ class MtpProposer(EagleProposer):
|
||||
batch_descriptor=batch_descriptor,
|
||||
num_actual_tokens=num_tokens,
|
||||
is_draft_model=True):
|
||||
|
||||
if not vllm_version_is("v0.15.0"):
|
||||
# Reset MOE layer index for each MTP step to match all_moe_layers registration
|
||||
forward_context = get_forward_context()
|
||||
if forward_context is not None:
|
||||
forward_context.moe_layer_index = 0
|
||||
|
||||
with record_function_or_nullcontext('mtp_forward'):
|
||||
model_kwargs = {}
|
||||
model_kwargs["attn_metadata"] = attn_metadata
|
||||
|
||||
@@ -30,7 +30,6 @@ import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from vllm.attention.layer import Attention, MLAAttention
|
||||
from vllm.compilation.cuda_graph import CUDAGraphStat
|
||||
from vllm.config import CompilationMode, CUDAGraphMode, VllmConfig, get_layers_from_vllm_config
|
||||
from vllm.distributed import get_tensor_model_parallel_world_size, tensor_model_parallel_all_gather
|
||||
@@ -137,6 +136,12 @@ if TYPE_CHECKING:
|
||||
else:
|
||||
xgr = LazyLoader("xgr", globals(), "xgrammar")
|
||||
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention, MLAAttention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention, MLAAttention
|
||||
|
||||
# if true, allow tensor initialization and casting with internal format (e.g., NZ)
|
||||
torch.npu.config.allow_internal_format = True
|
||||
@@ -2026,6 +2031,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
remove_lora: bool = True,
|
||||
activate_lora: bool = False,
|
||||
is_graph_capturing: bool = False,
|
||||
num_active_loras: int = 0,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
# only support eager mode and piecewise graph now
|
||||
assert cudagraph_runtime_mode is None or cudagraph_runtime_mode.valid_runtime_modes()
|
||||
|
||||
Reference in New Issue
Block a user