[Main2Main] Upgrade vLLM to 0226 (#6813)

### What this PR does / why we need it?

Breaking:
1. https://github.com/vllm-project/vllm/pull/33452
2. https://github.com/vllm-project/vllm/pull/33451
3. https://github.com/vllm-project/vllm/pull/32567
4. https://github.com/vllm-project/vllm/pull/32344

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.15.0
- vLLM main:
83b47f67b1

---------

Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: gcanlin <canlinguosdu@gmail.com>
Co-authored-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
Canlin Guo
2026-02-27 16:05:21 +08:00
committed by GitHub
parent 80316c5824
commit e4458b2d2b
40 changed files with 117 additions and 184 deletions

View File

@@ -41,7 +41,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper, update_full_graph_params
from vllm_ascend.ops.triton.spec_decode.utils import prepare_inputs_padded_kernel
from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
from vllm_ascend.utils import enable_sp, lmhead_tp_enable, shared_expert_dp_enabled, vllm_version_is
from vllm_ascend.utils import enable_sp, lmhead_tp_enable, shared_expert_dp_enabled
# Currently we will fix block size to a small one since `num_reqs` can't be too large
_PREPARE_INPUTS_BLOCK_SIZE = 4
@@ -357,11 +357,10 @@ class EagleProposer(VllmEagleProposer):
is_draft_model=True,
draft_attn_metadatas=multi_steps_attn_metadata,
):
if not vllm_version_is("v0.15.0"):
# Reset MOE layer index before first model call
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Reset MOE layer index before first model call
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
self._runnable(
num_input_tokens=num_tokens,
@@ -522,11 +521,10 @@ class EagleProposer(VllmEagleProposer):
is_draft_model=True,
draft_attn_metadatas=multi_steps_attn_metadata,
):
if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for forward pass
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Reset MOE layer index for forward pass
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
draft_token_ids = self._runnable(
num_input_tokens=num_input_tokens,
@@ -617,11 +615,10 @@ class EagleProposer(VllmEagleProposer):
forward_context.num_accept_tokens = batch_size
for draft_step in range(self.num_speculative_tokens - 1):
if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for each draft step iteration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Reset MOE layer index for each draft step iteration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Update the inputs.
# cast to int32 is crucial when eagle model is compiled.

View File

@@ -16,7 +16,7 @@ from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla, update_cos_sin
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
from vllm_ascend.utils import lmhead_tp_enable, vllm_version_is
from vllm_ascend.utils import lmhead_tp_enable
class MtpProposer(EagleProposer):
@@ -130,11 +130,10 @@ class MtpProposer(EagleProposer):
is_draft_model=True,
in_profile_run=is_profile,
):
if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for each MTP step iteration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Reset MOE layer index for each MTP step iteration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
previous_hidden_states, positions = self.maybe_pad_and_reduce(previous_hidden_states, positions)
self.model(input_ids=input_ids, positions=positions, hidden_states=previous_hidden_states)
forward_context = get_forward_context()
@@ -341,11 +340,10 @@ class MtpProposer(EagleProposer):
num_actual_tokens=num_tokens,
is_draft_model=True,
):
if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for each MTP step to match all_moe_layers registration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
# Reset MOE layer index for each MTP step to match all_moe_layers registration
forward_context = get_forward_context()
if forward_context is not None:
forward_context.moe_layer_index = 0
with record_function_or_nullcontext("mtp_forward"):
model_kwargs = {}