2025-09-04 11:34:47 +08:00
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
import copy
|
2026-02-07 09:16:07 +08:00
|
|
|
from collections.abc import Callable
|
|
|
|
|
from contextlib import AbstractContextManager, contextmanager, nullcontext
|
|
|
|
|
from typing import Any
|
2025-09-04 11:34:47 +08:00
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
import torch
|
|
|
|
|
import torch.nn as nn
|
2026-01-06 16:47:39 +08:00
|
|
|
import torch.nn.functional as F
|
2026-02-07 09:16:07 +08:00
|
|
|
from vllm.config import CompilationMode, CUDAGraphMode, VllmConfig, get_layers_from_vllm_config
|
|
|
|
|
from vllm.distributed.parallel_state import (
|
2026-02-27 16:06:56 +08:00
|
|
|
get_pcp_group,
|
2026-02-07 09:16:07 +08:00
|
|
|
get_pp_group,
|
|
|
|
|
get_tp_group,
|
|
|
|
|
get_world_group,
|
|
|
|
|
init_model_parallel_group,
|
|
|
|
|
patch_tensor_parallel_group,
|
|
|
|
|
)
|
2026-03-21 16:57:22 +08:00
|
|
|
from vllm.forward_context import BatchDescriptor, get_forward_context
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm.logger import logger
|
[1/N][Refactor] Refactor code to adapt with vllm main (#3612)
### What this PR does / why we need it?
This is the step 1 of refactoring code to adapt with vllm main, and this
pr aligned with
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
1. refactor deepseek to the latest code arch as of
https://github.com/vllm-project/vllm/commit/17c540a993af88204ad1b78345c8a865cf58ce44
2. bunches of fixes due to vllm changes
- Fix `AscendScheduler` `__post_init__`, caused by
https://github.com/vllm-project/vllm/pull/25075
- Fix `AscendScheduler` init got an unexpected arg `block_size`, caused
by https://github.com/vllm-project/vllm/pull/26296
- Fix `KVCacheManager` `get_num_common_prefix_blocks` arg, caused by
https://github.com/vllm-project/vllm/pull/23485
- Fix `MLAAttention` import,caused by
https://github.com/vllm-project/vllm/pull/25103
- Fix `SharedFusedMoE` import, caused by
https://github.com/vllm-project/vllm/pull/26145
- Fix `LazyLoader` improt, caused by
https://github.com/vllm-project/vllm/pull/27022
- Fix `vllm.utils.swap_dict_values` improt, caused by
https://github.com/vllm-project/vllm/pull/26990
- Fix `Backend` enum import, caused by
https://github.com/vllm-project/vllm/pull/25893
- Fix `CompilationLevel` renaming to `CompilationMode` issue introduced
by https://github.com/vllm-project/vllm/pull/26355
- Fix fused_moe ops, caused by
https://github.com/vllm-project/vllm/pull/24097
- Fix bert model because of `inputs_embeds`, caused by
https://github.com/vllm-project/vllm/pull/25922
- Fix MRope because of `get_input_positions_tensor` to
`get_mrope_input_positions`, caused by
https://github.com/vllm-project/vllm/pull/24172
- Fix `splitting_ops` changes introduced by
https://github.com/vllm-project/vllm/pull/25845
- Fix multi-modality changes introduced by
https://github.com/vllm-project/vllm/issues/16229
- Fix lora bias dropping issue introduced by
https://github.com/vllm-project/vllm/pull/25807
- Fix structured ouput break introduced by
https://github.com/vllm-project/vllm/issues/26737
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
CI passed with existing test.
- vLLM version: v0.11.0rc3
- vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0
---------
Signed-off-by: MengqingCao <cmq0113@163.com>
Signed-off-by: Icey <1790571317@qq.com>
Co-authored-by: Icey <1790571317@qq.com>
2025-10-24 16:55:08 +08:00
|
|
|
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm.model_executor.model_loader import get_model
|
|
|
|
|
from vllm.model_executor.models import supports_multimodal
|
2026-01-05 14:07:54 +08:00
|
|
|
from vllm.model_executor.models.deepseek_v2 import DeepseekV32IndexerCache
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
|
2026-01-05 16:51:29 +08:00
|
|
|
from vllm.triton_utils import HAS_TRITON, triton
|
2026-01-06 16:47:39 +08:00
|
|
|
from vllm.utils.math_utils import cdiv
|
2025-11-24 17:08:20 +08:00
|
|
|
from vllm.utils.platform_utils import is_pin_memory_available
|
2025-12-16 22:06:40 +08:00
|
|
|
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm.v1.core.sched.output import SchedulerOutput
|
|
|
|
|
from vllm.v1.sample.metadata import SamplingMetadata
|
2026-03-13 14:07:35 +08:00
|
|
|
from vllm.v1.spec_decode.eagle import EagleProposer
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
|
2026-03-13 14:07:35 +08:00
|
|
|
from vllm.v1.spec_decode.utils import (
|
|
|
|
|
PADDING_SLOT_ID,
|
|
|
|
|
compute_new_slot_mapping,
|
|
|
|
|
extend_all_queries_by_N,
|
|
|
|
|
)
|
2025-12-16 22:06:40 +08:00
|
|
|
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-03-13 09:11:46 +08:00
|
|
|
from vllm_ascend.ascend_forward_context import _EXTRA_CTX, set_ascend_forward_context
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
|
2025-12-16 22:06:40 +08:00
|
|
|
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
2025-09-04 11:34:47 +08:00
|
|
|
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
|
2026-02-07 09:16:07 +08:00
|
|
|
from vllm_ascend.compilation.acl_graph import ACLGraphWrapper, update_full_graph_params
|
|
|
|
|
from vllm_ascend.ops.triton.spec_decode.utils import prepare_inputs_padded_kernel
|
2026-01-05 16:51:29 +08:00
|
|
|
from vllm_ascend.ops.triton.triton_utils import get_vectorcore_num
|
2026-03-23 15:39:24 +08:00
|
|
|
from vllm_ascend.utils import enable_sp, lmhead_tp_enable, shared_expert_dp_enabled
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-01-05 16:51:29 +08:00
|
|
|
# Currently we will fix block size to a small one since `num_reqs` can't be too large
|
|
|
|
|
_PREPARE_INPUTS_BLOCK_SIZE = 4
|
|
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-01-14 09:00:37 +08:00
|
|
|
# TODO: Remove it when the bug of fx-graph is solved
|
|
|
|
|
# patch vllm_config to be in CompilationMode.NONE temporarily
|
|
|
|
|
@contextmanager
|
|
|
|
|
def _maybe_eager_context(vllm_config):
|
|
|
|
|
raw_compilation_config_mode = vllm_config.compilation_config.mode
|
|
|
|
|
vllm_config.compilation_config.mode = CompilationMode.NONE
|
|
|
|
|
try:
|
|
|
|
|
yield
|
|
|
|
|
finally:
|
|
|
|
|
vllm_config.compilation_config.mode = raw_compilation_config_mode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# split hidden states along dimension of sequence
|
|
|
|
|
def split_inputs_tp_to_sp(hidden_states, out):
|
|
|
|
|
# tp and sp share the same group
|
|
|
|
|
group = get_tp_group()
|
|
|
|
|
|
|
|
|
|
world_size = group.world_size
|
|
|
|
|
rank = group.rank
|
|
|
|
|
|
|
|
|
|
num_tokens = hidden_states.shape[0]
|
|
|
|
|
# the size per rank after padded
|
|
|
|
|
padded_num_tokens_per_rank = (num_tokens + world_size - 1) // world_size
|
|
|
|
|
# compute the start and end of slice
|
|
|
|
|
start = padded_num_tokens_per_rank * rank
|
|
|
|
|
end = padded_num_tokens_per_rank * (rank + 1)
|
|
|
|
|
|
|
|
|
|
# copy only hidden_states in current rank
|
|
|
|
|
hidden_states_curr_rank = hidden_states[start:end]
|
2026-02-07 09:16:07 +08:00
|
|
|
out[: hidden_states_curr_rank.shape[0]] = hidden_states_curr_rank
|
2026-01-14 09:00:37 +08:00
|
|
|
return out[:padded_num_tokens_per_rank]
|
|
|
|
|
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
class SpecDecodeBaseProposer(EagleProposer):
|
2026-02-07 09:16:07 +08:00
|
|
|
_runnable: ACLGraphWrapper | Callable
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
def __init__(self, vllm_config: VllmConfig, device: torch.device, pass_hidden_states_to_model: bool, runner=None):
|
2025-12-29 16:25:52 +08:00
|
|
|
super().__init__(vllm_config, device, runner)
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2025-12-29 16:25:52 +08:00
|
|
|
self.use_async_scheduling = self.vllm_config.scheduler_config.async_scheduling
|
2026-03-13 14:07:35 +08:00
|
|
|
self.pass_hidden_states_to_model = pass_hidden_states_to_model
|
2025-12-29 09:54:51 +08:00
|
|
|
self.decode_threshold = 1 + self.num_speculative_tokens
|
[main][bugfix] Fixed the problem of speculative decoding in FULL mode (#7148)
### What this PR does / why we need it?
Fixed the error of speculative decoding in FULL mode when `num_spec + 1`
not in `cudagraph_capture_sizes`.
Now, we can run speculative decoding in FULL mode, but with drafter as
eager.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 .
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 2,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm/vllm/v1/cudagraph_dispatcher.py", line 140, in _create_padded_batch_descriptor
assert num_tokens_padded % uniform_decode_query_len == 0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 249
num_draft_tokens: 498
num_accepted_tokens: 149
mean acceptance length: 1.60
--------------------------------------------------
acceptance at token 0: 0.43
acceptance at token 1: 0.17
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 14:51:12 +08:00
|
|
|
self.query_start_loc = self.runner._make_buffer(self.runner.max_num_reqs + 2, dtype=torch.int32)
|
2026-02-07 09:16:07 +08:00
|
|
|
self.arange_cpu = torch.arange(self.arange.shape[0], device="cpu", dtype=torch.int32)
|
2025-12-09 18:51:00 +08:00
|
|
|
self.attn_mask_builder = AttentionMaskBuilder(self.device)
|
2025-12-29 16:25:52 +08:00
|
|
|
|
|
|
|
|
self.enable_shared_expert_dp = shared_expert_dp_enabled()
|
|
|
|
|
|
2026-01-15 10:24:35 +08:00
|
|
|
self.pcp_size = self.runner.pcp_size
|
2025-12-29 16:25:52 +08:00
|
|
|
self.dcp_size = self.runner.dcp_size
|
|
|
|
|
self.pcp_rank = self.runner.pcp_rank
|
|
|
|
|
self.dcp_rank = self.runner.dcp_rank
|
2026-01-19 08:58:07 +08:00
|
|
|
|
2025-12-29 16:25:52 +08:00
|
|
|
self.full_indices = range(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.runner.max_num_tokens * self.pcp_size * self.dcp_size
|
|
|
|
|
+ self.pcp_size * self.dcp_size * self.runner.max_num_reqs
|
|
|
|
|
)
|
2025-12-29 16:25:52 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
self.use_sparse = hasattr(vllm_config.model_config.hf_text_config, "index_topk")
|
[BugFix] Support setting tp=1 for the Eagle draft model to take effect (#6097)
According to the official documentation, the parameter
"draft_tensor_parallel_size": 1 is supposed to be applied to the Eagle3
model. However, based on actual debugging, it was found that the number
of tensor parallelisms (tp) of the Eagle model is consistent with that
of the target model. The setting of tp for the draft model did not take
effect as expected.
**Note:** This feature has not been superimposed and tested with `sp`
and `dp`. It will be adapted later
No
```python
from vllm import LLM, SamplingParams
def main():
prompts = [
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
llm = LLM(
model="meta-llama/Llama-3.1-8B-Instruct",
tensor_parallel_size=4,
gpu_memory_utilization=0.9,
enforce_eager=True,
speculative_config={
"method": "eagle3",
"model": "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B"
"draft_tensor_parallel_size": 1,
"num_speculative_tokens": 3,
},
)
outputs = llm.generate(prompts, sampling_params)
print(f"Outputs: {outputs}")
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
Fixes vllm-project/vllm#31345
### What this PR does / why we need it?
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60
Signed-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
Co-authored-by: drslark <slarksblood@qq.com>
2026-01-22 11:36:23 +08:00
|
|
|
# NOTE:
|
|
|
|
|
# `draft_tensor_parallel_size` does not take effect for Eagle:
|
|
|
|
|
# the draft model uses the same TP size as the target model in practice.
|
|
|
|
|
# so we applied this patch to set tp=1 of draft model separately.
|
|
|
|
|
# Due to verification of `_verify_and_get_draft_tp` in vllm,
|
|
|
|
|
# the value of `draft_tensor_parallel_size` here will either be 1 separately
|
|
|
|
|
# or the same as target model.
|
|
|
|
|
# TODO(zhaomingyu13): If we want to adapt to the case where draft model tp
|
|
|
|
|
# is not 1 and differs from target model, this part should be rewritten.
|
2026-02-07 09:16:07 +08:00
|
|
|
if vllm_config.parallel_config.tensor_parallel_size != self.speculative_config.draft_tensor_parallel_size:
|
[BugFix] Support setting tp=1 for the Eagle draft model to take effect (#6097)
According to the official documentation, the parameter
"draft_tensor_parallel_size": 1 is supposed to be applied to the Eagle3
model. However, based on actual debugging, it was found that the number
of tensor parallelisms (tp) of the Eagle model is consistent with that
of the target model. The setting of tp for the draft model did not take
effect as expected.
**Note:** This feature has not been superimposed and tested with `sp`
and `dp`. It will be adapted later
No
```python
from vllm import LLM, SamplingParams
def main():
prompts = [
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
llm = LLM(
model="meta-llama/Llama-3.1-8B-Instruct",
tensor_parallel_size=4,
gpu_memory_utilization=0.9,
enforce_eager=True,
speculative_config={
"method": "eagle3",
"model": "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B"
"draft_tensor_parallel_size": 1,
"num_speculative_tokens": 3,
},
)
outputs = llm.generate(prompts, sampling_params)
print(f"Outputs: {outputs}")
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
Fixes vllm-project/vllm#31345
### What this PR does / why we need it?
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60
Signed-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
Co-authored-by: drslark <slarksblood@qq.com>
2026-01-22 11:36:23 +08:00
|
|
|
tp_group = init_model_parallel_group(
|
|
|
|
|
[[get_world_group().rank]],
|
|
|
|
|
get_world_group().rank,
|
|
|
|
|
torch.distributed.get_backend(get_world_group().device_group),
|
|
|
|
|
use_message_queue_broadcaster=True,
|
|
|
|
|
group_name="tp",
|
|
|
|
|
)
|
|
|
|
|
self.tp_group_context = patch_tensor_parallel_group(tp_group)
|
|
|
|
|
else:
|
|
|
|
|
self.tp_group_context = nullcontext()
|
2025-12-22 15:24:54 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
self.use_cuda_graph = self.runner._use_aclgraph() and not self.speculative_config.enforce_eager
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
if self.method == "mtp":
|
2026-03-06 17:11:22 +08:00
|
|
|
self.use_cuda_graph = (
|
|
|
|
|
self.use_cuda_graph
|
|
|
|
|
and not self.use_async_scheduling
|
|
|
|
|
and not self.speculative_config.disable_padded_drafter_batch
|
|
|
|
|
)
|
2026-01-15 10:24:35 +08:00
|
|
|
|
2026-01-14 09:00:37 +08:00
|
|
|
# TODO: Remove it when the bug of fx-graph is solved
|
2026-02-07 09:16:07 +08:00
|
|
|
self.maybe_eager_context: AbstractContextManager[Any] = nullcontext()
|
2026-01-14 09:00:37 +08:00
|
|
|
if not self.use_cuda_graph and enable_sp(vllm_config):
|
|
|
|
|
self.maybe_eager_context = _maybe_eager_context(vllm_config)
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
self.token_indices_to_sample = torch.zeros(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.vllm_config.scheduler_config.max_num_batched_tokens, dtype=torch.int32, device=device
|
|
|
|
|
)
|
|
|
|
|
slot_mapping_lens = self.runner.max_num_tokens + 2 * self.pcp_size * self.runner.max_num_reqs
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
self.slot_mapping_group = [
|
2026-02-07 09:16:07 +08:00
|
|
|
torch.zeros(slot_mapping_lens, dtype=torch.int32, device=device, pin_memory=self.runner.pin_memory)
|
|
|
|
|
for _ in range(self.num_speculative_tokens)
|
|
|
|
|
]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
|
|
|
|
self._runnable = self._run_merged_draft
|
Qwen3.5 MoE supports flashcomm v1 (#7644)
cherry pick from https://github.com/vllm-project/vllm-ascend/pull/7486
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
Multimodal models like Qwen3.5 MoE does embedding in model_runner, so
when flash comm is enabled, the first AllGather operation should be
skipped.
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No.
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
- vLLM version: v0.18.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
---------
Signed-off-by: Wangbingjie <wangbj1207@126.com>
Signed-off-by: wangbj127 <256472688+wangbj127@users.noreply.github.com>
2026-03-25 23:09:33 +08:00
|
|
|
self.is_multimodal_model = self.vllm_config.model_config.is_multimodal_model
|
2026-03-13 14:07:35 +08:00
|
|
|
if self.uses_mrope:
|
|
|
|
|
self.mrope_positions = torch.zeros((3, self.max_num_tokens + 1), dtype=torch.int32, device=device)
|
|
|
|
|
elif self.uses_xdrope_dim > 0 and self.draft_uses_xdrope_dim > 0:
|
|
|
|
|
self.xdrope_positions = torch.zeros(
|
|
|
|
|
(self.uses_xdrope_dim, self.max_num_tokens + 1),
|
|
|
|
|
dtype=torch.int32,
|
|
|
|
|
device=device,
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
# RoPE need (max_num_tokens,)
|
|
|
|
|
self.positions = torch.zeros(self.max_num_tokens, dtype=torch.int32, device=device)
|
|
|
|
|
|
2026-03-29 12:23:44 +08:00
|
|
|
self.token_arange_np = np.arange(self.max_num_tokens + 1)
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
def _get_model(self) -> nn.Module:
|
|
|
|
|
"""
|
|
|
|
|
Default method to call get_model(). Can be overridden by subclasses which
|
|
|
|
|
need to customize model loading.
|
|
|
|
|
"""
|
|
|
|
|
from vllm.compilation.backends import set_model_tag
|
|
|
|
|
|
|
|
|
|
with set_model_tag("eagle_head"):
|
|
|
|
|
model = get_model(
|
|
|
|
|
vllm_config=self.vllm_config,
|
|
|
|
|
model_config=self.vllm_config.speculative_config.draft_model_config,
|
|
|
|
|
)
|
|
|
|
|
return model
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
def load_model(self, model: nn.Module) -> None:
|
2026-02-07 09:16:07 +08:00
|
|
|
target_attn_layer_names = set(get_layers_from_vllm_config(self.vllm_config, AttentionLayerBase).keys())
|
2026-01-05 14:07:54 +08:00
|
|
|
|
2026-01-14 09:00:37 +08:00
|
|
|
with self.maybe_eager_context:
|
2026-03-13 14:07:35 +08:00
|
|
|
self.model = self._get_model()
|
2026-01-05 14:07:54 +08:00
|
|
|
|
2026-03-13 16:14:15 +08:00
|
|
|
# Find draft layers (attention layers added by draft model)
|
|
|
|
|
all_attn_layers = get_layers_from_vllm_config(
|
|
|
|
|
self.vllm_config,
|
|
|
|
|
AttentionLayerBase, # type: ignore[type-abstract]
|
|
|
|
|
)
|
|
|
|
|
all_indexer_layer_names = set(get_layers_from_vllm_config(self.vllm_config, DeepseekV32IndexerCache).keys())
|
|
|
|
|
self._draft_attn_layer_names = set(all_attn_layers.keys()) - target_attn_layer_names - all_indexer_layer_names
|
2026-03-09 11:05:01 +08:00
|
|
|
|
2026-03-13 16:14:15 +08:00
|
|
|
self.attn_layer_names = list(sorted(self._draft_attn_layer_names))
|
|
|
|
|
draft_attn_layers_dict = get_layers_from_vllm_config(self.vllm_config, AttentionLayerBase)
|
2026-03-09 11:05:01 +08:00
|
|
|
self.kernel_block_size = (
|
|
|
|
|
draft_attn_layers_dict[self.attn_layer_names[0]].get_attn_backend().get_supported_kernel_block_sizes()[0]
|
|
|
|
|
)
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
self.piece_all_attn_layer_name = []
|
|
|
|
|
for _ in range(self.num_speculative_tokens):
|
2026-02-07 09:16:07 +08:00
|
|
|
self.piece_all_attn_layer_name.append([name for name in self.attn_layer_names])
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-01-19 08:58:07 +08:00
|
|
|
if supports_multimodal(model):
|
|
|
|
|
# handle multimodality
|
|
|
|
|
if self.get_model_name(model) in [
|
2026-02-07 09:16:07 +08:00
|
|
|
"Qwen2_5_VLForConditionalGeneration",
|
|
|
|
|
"Qwen3VLForConditionalGeneration",
|
|
|
|
|
"Qwen3VLMoeForConditionalGeneration",
|
2026-03-10 23:28:58 +08:00
|
|
|
"Qwen3_5ForConditionalGeneration",
|
|
|
|
|
"Qwen3_5MoeForConditionalGeneration",
|
2026-01-19 08:58:07 +08:00
|
|
|
]:
|
|
|
|
|
self.model.config.image_token_index = model.config.image_token_id
|
2026-02-07 09:16:07 +08:00
|
|
|
elif self.get_model_name(model) == "PixtralForConditionalGeneration":
|
|
|
|
|
self.model.config.image_token_index = model.config.vision_config.image_token_id
|
2026-03-21 10:48:01 +08:00
|
|
|
elif self.get_model_name(model) == "KimiK25ForConditionalGeneration":
|
|
|
|
|
self.model.config.image_token_index = model.config.media_placeholder_token_id
|
2026-01-19 08:58:07 +08:00
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
self.model.config.image_token_index = model.config.image_token_index
|
2026-01-19 08:58:07 +08:00
|
|
|
target_language_model = model.get_language_model()
|
|
|
|
|
else:
|
|
|
|
|
target_language_model = model
|
|
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
# share embed_tokens with the target model if needed
|
2026-03-13 14:07:35 +08:00
|
|
|
self._maybe_share_embeddings(target_language_model)
|
|
|
|
|
self._maybe_share_lm_head(model)
|
|
|
|
|
|
|
|
|
|
if self.parallel_drafting and self.pass_hidden_states_to_model:
|
|
|
|
|
assert self.parallel_drafting_hidden_state_tensor is not None
|
|
|
|
|
self.parallel_drafting_hidden_state_tensor.copy_(
|
|
|
|
|
self.model.combine_hidden_states(self.model.mask_hidden.view(3 * self.hidden_size))
|
|
|
|
|
if self.eagle3_use_aux_hidden_state
|
|
|
|
|
else self.model.mask_hidden.view(self.hidden_size)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _maybe_share_embeddings(self, target_language_model: nn.Module) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Some draft models may not have their own embedding layers, and some may
|
|
|
|
|
have a duplicate copy of the target model's embedding layers. In these cases,
|
|
|
|
|
we share the target model's embedding layers with the draft model to save
|
|
|
|
|
memory.
|
|
|
|
|
"""
|
2025-09-04 11:34:47 +08:00
|
|
|
if get_pp_group().world_size == 1:
|
2026-01-19 08:58:07 +08:00
|
|
|
if hasattr(target_language_model.model, "embed_tokens"):
|
|
|
|
|
target_embed_tokens = target_language_model.model.embed_tokens
|
|
|
|
|
elif hasattr(target_language_model.model, "embedding"):
|
|
|
|
|
target_embed_tokens = target_language_model.model.embedding
|
|
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
raise AttributeError("Target model does not have 'embed_tokens' or 'embedding' attribute")
|
2026-01-20 21:34:28 +08:00
|
|
|
# If pp>1, the weights of mtp and the main model's embedding are not on the same device.
|
|
|
|
|
# check if mtp model use main model's embedding and LMhead
|
[Bugfix] Fix the issue of the acceptance rate decline for Qwen3-30B-A3B-EAGLE3 (#6138)
### What this PR does / why we need it?
Due to the long-term lack of synchronization with the upstream code, a
problem that led to a decrease in the acceptance rate of the
Qwen3-30B-A3B-EAGLE3 draft model was introduced when fixing the
bug(#5967). Now, synchronize with the upstream and fix this bug
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
```python
from vllm import LLM, SamplingParams
def main():
prompts = [
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
gpu_memory_utilization=0.9,
enforce_eager=True,
speculative_config={
"method": "eagle3",
"model": "AngelSlim/Qwen3-a3B_eagle3"
"num_speculative_tokens": 3,
},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
print(f"Outputs: {outputs}")
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60
Signed-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
Co-authored-by: drslark <slarkblood@qq.com>
2026-01-23 16:12:56 +08:00
|
|
|
share_embeddings = False
|
|
|
|
|
if hasattr(self.model, "has_own_embed_tokens"):
|
|
|
|
|
# EAGLE model
|
|
|
|
|
if not self.model.has_own_embed_tokens:
|
|
|
|
|
share_embeddings = True
|
|
|
|
|
logger.info(
|
|
|
|
|
"Detected EAGLE model without its own embed_tokens in the"
|
|
|
|
|
" checkpoint. Sharing target model embedding weights with the"
|
|
|
|
|
" draft model."
|
|
|
|
|
)
|
|
|
|
|
elif (
|
|
|
|
|
isinstance(target_embed_tokens.weight, torch.Tensor)
|
|
|
|
|
and isinstance(self.model.model.embed_tokens.weight, torch.Tensor)
|
|
|
|
|
# TODO: Offload to CPU for comparison to avoid extra NPU memory
|
|
|
|
|
# usage in CI testing environments with limited NPU memory
|
|
|
|
|
and torch.equal(
|
|
|
|
|
target_embed_tokens.weight.cpu(),
|
|
|
|
|
self.model.model.embed_tokens.weight.cpu(),
|
|
|
|
|
)
|
|
|
|
|
):
|
|
|
|
|
share_embeddings = True
|
|
|
|
|
logger.info(
|
|
|
|
|
"Detected EAGLE model with embed_tokens identical to the target"
|
|
|
|
|
" model. Sharing target model embedding weights with the draft"
|
|
|
|
|
" model."
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Detected EAGLE model with distinct embed_tokens weights. "
|
|
|
|
|
"Keeping separate embedding weights from the target model."
|
|
|
|
|
)
|
2026-01-20 21:34:28 +08:00
|
|
|
else:
|
[Bugfix] Fix the issue of the acceptance rate decline for Qwen3-30B-A3B-EAGLE3 (#6138)
### What this PR does / why we need it?
Due to the long-term lack of synchronization with the upstream code, a
problem that led to a decrease in the acceptance rate of the
Qwen3-30B-A3B-EAGLE3 draft model was introduced when fixing the
bug(#5967). Now, synchronize with the upstream and fix this bug
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
```python
from vllm import LLM, SamplingParams
def main():
prompts = [
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
gpu_memory_utilization=0.9,
enforce_eager=True,
speculative_config={
"method": "eagle3",
"model": "AngelSlim/Qwen3-a3B_eagle3"
"num_speculative_tokens": 3,
},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
print(f"Outputs: {outputs}")
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60
Signed-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
Co-authored-by: drslark <slarkblood@qq.com>
2026-01-23 16:12:56 +08:00
|
|
|
# MTP model
|
|
|
|
|
share_embeddings = True
|
2026-02-07 09:16:07 +08:00
|
|
|
logger.info("Detected MTP model. Sharing target model embedding weights with the draft model.")
|
[Bugfix] Fix the issue of the acceptance rate decline for Qwen3-30B-A3B-EAGLE3 (#6138)
### What this PR does / why we need it?
Due to the long-term lack of synchronization with the upstream code, a
problem that led to a decrease in the acceptance rate of the
Qwen3-30B-A3B-EAGLE3 draft model was introduced when fixing the
bug(#5967). Now, synchronize with the upstream and fix this bug
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
```python
from vllm import LLM, SamplingParams
def main():
prompts = [
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen3-30B-A3B",
tensor_parallel_size=4,
gpu_memory_utilization=0.9,
enforce_eager=True,
speculative_config={
"method": "eagle3",
"model": "AngelSlim/Qwen3-a3B_eagle3"
"num_speculative_tokens": 3,
},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
print(f"Outputs: {outputs}")
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/d68209402ddab3f54a09bc1f4de9a9495a283b60
Signed-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
Co-authored-by: drslark <slarkblood@qq.com>
2026-01-23 16:12:56 +08:00
|
|
|
|
|
|
|
|
if share_embeddings:
|
|
|
|
|
if hasattr(self.model.model, "embed_tokens"):
|
|
|
|
|
del self.model.model.embed_tokens
|
|
|
|
|
self.model.model.embed_tokens = target_embed_tokens
|
2025-09-04 11:34:47 +08:00
|
|
|
else:
|
|
|
|
|
logger.info(
|
2026-02-07 09:16:07 +08:00
|
|
|
"Since PP > 1 or other reasons the model head loaded its own vocab embedding"
|
2025-09-04 11:34:47 +08:00
|
|
|
" weights instead of sharing them with the target model."
|
|
|
|
|
)
|
2026-03-13 14:07:35 +08:00
|
|
|
|
|
|
|
|
# share lm_head with the target model if needed
|
|
|
|
|
def _maybe_share_lm_head(self, model: nn.Module) -> None:
|
2025-09-04 11:34:47 +08:00
|
|
|
# some model definition do not define lm_head explicitly
|
|
|
|
|
# and reuse embed_tokens for lm_head, e.g., CohereForCausalLM
|
2025-12-29 16:25:52 +08:00
|
|
|
if self.method == "eagle" and hasattr(model, "lm_head"):
|
2025-09-04 11:34:47 +08:00
|
|
|
logger.info("Loading EAGLE LM head weights from the target model.")
|
|
|
|
|
if supports_multimodal(model):
|
|
|
|
|
self.model.lm_head = model.get_language_model().lm_head
|
|
|
|
|
else:
|
|
|
|
|
self.model.lm_head = model.lm_head
|
|
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
if self.method == "mtp" and self.vllm_config.model_config.is_deepseek_mla:
|
2026-01-05 14:07:54 +08:00
|
|
|
for _, layer_module in self.model.model.layers.items():
|
2026-02-07 09:16:07 +08:00
|
|
|
if torch.equal(layer_module.shared_head.head.weight, model.lm_head.weight):
|
2026-01-05 14:07:54 +08:00
|
|
|
layer_module.shared_head.head = model.lm_head
|
|
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
if self.vllm_config.compilation_config.cudagraph_mode.has_full_cudagraphs() and self.use_cuda_graph:
|
2025-12-29 09:54:51 +08:00
|
|
|
self.update_stream = torch.npu.Stream()
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
if self.method == "mtp":
|
2026-02-07 09:16:07 +08:00
|
|
|
self.model = ACLGraphWrapper(self.model, self.vllm_config, runtime_mode=CUDAGraphMode.FULL)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
self._runnable = ACLGraphWrapper(
|
|
|
|
|
self._run_merged_draft, self.vllm_config, runtime_mode=CUDAGraphMode.FULL
|
|
|
|
|
)
|
2025-12-29 09:54:51 +08:00
|
|
|
|
|
|
|
|
def get_model(self) -> nn.Module:
|
|
|
|
|
# get raw model out of the aclgraph wrapper.
|
|
|
|
|
if isinstance(self.model, ACLGraphWrapper):
|
|
|
|
|
return self.model.unwrap()
|
|
|
|
|
return self.model
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
def shallow_copy_metadata(self, attn_metadata):
|
|
|
|
|
# Currently, new objects will be assigned to the lists in attn_metadata
|
|
|
|
|
# when update. So we can use the shallow copy.
|
|
|
|
|
return copy.copy(attn_metadata)
|
|
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
@torch.inference_mode()
|
2026-02-07 09:16:07 +08:00
|
|
|
def dummy_run(
|
|
|
|
|
self,
|
|
|
|
|
num_tokens: int,
|
|
|
|
|
with_prefill: bool = False,
|
|
|
|
|
in_graph_capturing: bool = False,
|
|
|
|
|
num_reqs: int = 0,
|
|
|
|
|
num_tokens_across_dp: torch.Tensor | None = None,
|
|
|
|
|
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
|
|
|
|
|
batch_descriptor=None,
|
|
|
|
|
dummy_compute_logits=lambda hidden_states: None,
|
|
|
|
|
is_profile=False,
|
|
|
|
|
):
|
2026-02-02 19:15:31 +08:00
|
|
|
(
|
|
|
|
|
num_tokens,
|
|
|
|
|
num_tokens_across_dp,
|
|
|
|
|
_,
|
2026-02-07 09:16:07 +08:00
|
|
|
) = self.runner._sync_metadata_across_dp(num_tokens, is_draft_model=True)
|
2026-02-02 19:15:31 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
multi_steps_attn_metadata = []
|
2025-12-29 09:54:51 +08:00
|
|
|
if not self.use_cuda_graph:
|
|
|
|
|
aclgraph_runtime_mode = CUDAGraphMode.NONE
|
2026-02-07 09:16:07 +08:00
|
|
|
if aclgraph_runtime_mode == CUDAGraphMode.FULL and len(self.runner.attn_groups) > 0:
|
|
|
|
|
num_computed_tokens_cpu = self.runner.input_batch.num_computed_tokens_cpu_tensor[:num_reqs]
|
[main][bugfix] Fixed the problem of drafter crashed in FULL mode (#7158)
### What this PR does / why we need it?
The merged graph of draft in `FULL` mode is broken now.
This pr solves it.
Also, `actual_seq_lengths_q` in `model_runner` is found redundant, so,
it is removed.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 and
https://github.com/vllm-project/vllm-ascend/pull/7148.
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 3,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm-ascend/vllm_ascend/attention/attention_v1.py", line 575, in full_graph_fia
graph_params.events[num_tokens].append(event)
~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^
KeyError: 132
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 242
num_draft_tokens: 726
num_accepted_tokens: 156
mean acceptance length: 1.64
--------------------------------------------------
acceptance at token 0: 0.42
acceptance at token 1: 0.16
acceptance at token 2: 0.07
```
We also test `FULL_DECODE_ONLY` mode.
The result is:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 244
num_draft_tokens: 732
num_accepted_tokens: 155
mean acceptance length: 1.64
--------------------------------------------------
acceptance at token 0: 0.42
acceptance at token 1: 0.16
acceptance at token 2: 0.06
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 18:38:50 +08:00
|
|
|
|
|
|
|
|
# num_reqs is already the padded version
|
|
|
|
|
self.query_start_loc.cpu[: num_reqs + 1].copy_(self.runner.query_start_loc.cpu[: num_reqs + 1])
|
2026-01-07 15:57:16 +08:00
|
|
|
self.query_start_loc.copy_to_gpu()
|
[main][bugfix] Fixed the problem of drafter crashed in FULL mode (#7158)
### What this PR does / why we need it?
The merged graph of draft in `FULL` mode is broken now.
This pr solves it.
Also, `actual_seq_lengths_q` in `model_runner` is found redundant, so,
it is removed.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 and
https://github.com/vllm-project/vllm-ascend/pull/7148.
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 3,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm-ascend/vllm_ascend/attention/attention_v1.py", line 575, in full_graph_fia
graph_params.events[num_tokens].append(event)
~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^
KeyError: 132
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 242
num_draft_tokens: 726
num_accepted_tokens: 156
mean acceptance length: 1.64
--------------------------------------------------
acceptance at token 0: 0.42
acceptance at token 1: 0.16
acceptance at token 2: 0.07
```
We also test `FULL_DECODE_ONLY` mode.
The result is:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 244
num_draft_tokens: 732
num_accepted_tokens: 155
mean acceptance length: 1.64
--------------------------------------------------
acceptance at token 0: 0.42
acceptance at token 1: 0.16
acceptance at token 2: 0.06
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 18:38:50 +08:00
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
common_attn_metadata = AscendCommonAttentionMetadata(
|
2026-02-07 09:16:07 +08:00
|
|
|
query_start_loc=self.query_start_loc.gpu[: num_reqs + 1],
|
|
|
|
|
query_start_loc_cpu=self.query_start_loc.cpu[: num_reqs + 1],
|
2025-12-29 09:54:51 +08:00
|
|
|
seq_lens_cpu=self.runner.seq_lens.cpu,
|
|
|
|
|
seq_lens=self.runner.seq_lens.gpu[:num_reqs],
|
|
|
|
|
num_reqs=num_reqs,
|
|
|
|
|
num_actual_tokens=num_tokens,
|
|
|
|
|
num_input_tokens=num_tokens,
|
|
|
|
|
max_query_len=self.num_speculative_tokens + 1,
|
|
|
|
|
num_computed_tokens_cpu=num_computed_tokens_cpu,
|
|
|
|
|
actual_seq_lengths_q=self.runner.actual_seq_lengths_q,
|
2026-02-07 09:16:07 +08:00
|
|
|
block_table_tensor=self.runner.input_batch.block_table[0].get_device_tensor()[:num_reqs],
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# This is used to hold a position.
|
2026-02-07 09:16:07 +08:00
|
|
|
slot_mapping=self.runner.input_batch.block_table[0].slot_mapping.gpu,
|
2025-12-29 09:54:51 +08:00
|
|
|
positions=self.runner.positions.gpu,
|
|
|
|
|
attn_state=self.runner.attn_state,
|
|
|
|
|
decode_token_per_req=self.runner.decode_token_per_req,
|
|
|
|
|
max_seq_len=0,
|
2025-09-04 11:34:47 +08:00
|
|
|
)
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
|
|
|
|
# update long_seq related params and flatten block_table
|
|
|
|
|
common_attn_metadata.prefill_context_parallel_metadata = self.runner.pcp_manager.long_seq_metadata
|
|
|
|
|
common_attn_metadata.block_table_tensor = self.runner.input_batch.block_table[0].get_device_tensor()[
|
|
|
|
|
: num_reqs * self.decode_threshold
|
|
|
|
|
]
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-03-23 15:39:24 +08:00
|
|
|
assert len(self.draft_attn_groups) > 0
|
|
|
|
|
builder = self.draft_attn_groups[0].get_metadata_builder()
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# update the tensor's address for each step.
|
|
|
|
|
for draft_step in range(self.num_speculative_tokens):
|
2026-02-07 09:16:07 +08:00
|
|
|
common_attn_metadata = self.shallow_copy_metadata(common_attn_metadata)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# Set the real slot_mapping.
|
|
|
|
|
common_attn_metadata.slot_mapping = self.slot_mapping_group[draft_step]
|
|
|
|
|
attn_metadata_eagle = builder.build_for_graph_capture(
|
2026-03-06 17:11:22 +08:00
|
|
|
common_attn_metadata,
|
|
|
|
|
AscendAttentionState.SpecDecoding if self.method == "mtp" else AscendAttentionState.ChunkedPrefill,
|
2026-02-07 09:16:07 +08:00
|
|
|
)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
per_layer_attn_metadata = dict()
|
|
|
|
|
for layer_name in self.attn_layer_names:
|
|
|
|
|
per_layer_attn_metadata[layer_name] = attn_metadata_eagle
|
|
|
|
|
multi_steps_attn_metadata.append(per_layer_attn_metadata)
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2026-01-19 08:58:07 +08:00
|
|
|
model_positions = self._get_positions(num_tokens)
|
2025-12-29 09:54:51 +08:00
|
|
|
|
[main][bugfix] Fixed the problem of speculative decoding in FULL mode (#7148)
### What this PR does / why we need it?
Fixed the error of speculative decoding in FULL mode when `num_spec + 1`
not in `cudagraph_capture_sizes`.
Now, we can run speculative decoding in FULL mode, but with drafter as
eager.
It depends on https://github.com/vllm-project/vllm-ascend/pull/7144 .
### Does this PR introduce _any_ user-facing change?
N/A
### How was this patch tested?
Test code is shown as below:
```python
prompts = [
"1.Who are you?",
"2. Who are you?",
]
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, top_k=40, max_tokens=200)
llm = LLM(
model="/home/some-model/Meta-Llama-3.1-8B-Instruct",
tensor_parallel_size=1,
max_num_seqs=32,
# enforce_eager=True,
disable_log_stats=False,
distributed_executor_backend="mp",
gpu_memory_utilization=0.7,
async_scheduling=True,
speculative_config={
"enforce_eager": True,
"model": "/home/some-model/EAGLE3-LLaMA3.1-Instruct-8B",
"disable_padded_drafter_batch": False,
"method": "eagle3",
"num_speculative_tokens": 2,
},
compilation_config={
"cudagraph_mode": "FULL",
"cudagraph_num_of_warmups": 1,
},
max_model_len=4096,
enable_prefix_caching=False,
)
outputs = llm.generate(prompts, sampling_params)
```
The result before:
```text
File "/vllm-workspace/vllm/vllm/v1/cudagraph_dispatcher.py", line 140, in _create_padded_batch_descriptor
assert num_tokens_padded % uniform_decode_query_len == 0
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```
The result after:
```text
--------------------------------------------------
total_num_output_tokens: 400
num_drafts: 249
num_draft_tokens: 498
num_accepted_tokens: 149
mean acceptance length: 1.60
--------------------------------------------------
acceptance at token 0: 0.43
acceptance at token 1: 0.17
```
- vLLM version: v0.16.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/4034c3d32e30d01639459edd3ab486f56993876d
Signed-off-by: drslark <slarksblood@qq.com>
2026-03-12 14:51:12 +08:00
|
|
|
batch_size = max(
|
|
|
|
|
num_tokens // (self.num_speculative_tokens + 1), 1
|
|
|
|
|
) # if not is_profile else self.runner.max_num_reqs
|
2026-02-27 16:06:56 +08:00
|
|
|
if is_profile:
|
|
|
|
|
batch_size = min(batch_size, self.runner.max_num_reqs)
|
2026-02-02 19:15:31 +08:00
|
|
|
|
Qwen3.5 MoE supports flashcomm v1 (#7644)
cherry pick from https://github.com/vllm-project/vllm-ascend/pull/7486
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
Multimodal models like Qwen3.5 MoE does embedding in model_runner, so
when flash comm is enabled, the first AllGather operation should be
skipped.
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No.
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
- vLLM version: v0.18.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
---------
Signed-off-by: Wangbingjie <wangbj1207@126.com>
Signed-off-by: wangbj127 <256472688+wangbj127@users.noreply.github.com>
2026-03-25 23:09:33 +08:00
|
|
|
if self.supports_mm_inputs:
|
|
|
|
|
mm_embeds, is_mm_embed = (None, None)
|
|
|
|
|
inputs_embeds = self.model.embed_input_ids(
|
|
|
|
|
self.input_ids[:num_tokens], multimodal_embeddings=mm_embeds, is_multimodal=is_mm_embed
|
|
|
|
|
)
|
|
|
|
|
self.inputs_embeds[:num_tokens] = inputs_embeds
|
|
|
|
|
inputs_embeds = self.inputs_embeds[:num_tokens]
|
|
|
|
|
else:
|
|
|
|
|
inputs_embeds = None
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
with set_ascend_forward_context(
|
2026-02-07 09:16:07 +08:00
|
|
|
multi_steps_attn_metadata[0] if multi_steps_attn_metadata else None,
|
|
|
|
|
self.vllm_config,
|
|
|
|
|
num_tokens=num_tokens,
|
|
|
|
|
num_tokens_across_dp=num_tokens_across_dp,
|
|
|
|
|
num_actual_tokens=0,
|
|
|
|
|
in_profile_run=is_profile,
|
|
|
|
|
batch_descriptor=batch_descriptor,
|
|
|
|
|
aclgraph_runtime_mode=aclgraph_runtime_mode,
|
|
|
|
|
is_draft_model=True,
|
|
|
|
|
draft_attn_metadatas=multi_steps_attn_metadata,
|
|
|
|
|
):
|
2026-02-27 16:05:21 +08:00
|
|
|
# Reset MOE layer index before first model call
|
|
|
|
|
forward_context = get_forward_context()
|
|
|
|
|
if forward_context is not None:
|
|
|
|
|
forward_context.moe_layer_index = 0
|
2026-02-05 19:31:17 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
self._runnable(
|
|
|
|
|
num_input_tokens=num_tokens,
|
|
|
|
|
batch_size=batch_size,
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample=self.token_indices_to_sample[: batch_size * self.extra_slots_per_request],
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# The target_position's address is same as the model_positions's
|
|
|
|
|
target_positions=model_positions,
|
Qwen3.5 MoE supports flashcomm v1 (#7644)
cherry pick from https://github.com/vllm-project/vllm-ascend/pull/7486
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
Multimodal models like Qwen3.5 MoE does embedding in model_runner, so
when flash comm is enabled, the first AllGather operation should be
skipped.
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No.
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
- vLLM version: v0.18.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
---------
Signed-off-by: Wangbingjie <wangbj1207@126.com>
Signed-off-by: wangbj127 <256472688+wangbj127@users.noreply.github.com>
2026-03-25 23:09:33 +08:00
|
|
|
inputs_embeds=inputs_embeds,
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
multi_steps_attn_metadata=multi_steps_attn_metadata,
|
2026-02-27 16:06:56 +08:00
|
|
|
num_tokens=num_tokens,
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
)
|
|
|
|
|
forward_context = get_forward_context()
|
2026-03-13 09:11:46 +08:00
|
|
|
if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL and not _EXTRA_CTX.capturing:
|
2026-02-07 09:16:07 +08:00
|
|
|
self._update_full_graph_params(forward_context, num_tokens, multi_steps_attn_metadata)
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
def _propose(
|
|
|
|
|
self,
|
|
|
|
|
# [num_tokens]
|
|
|
|
|
target_token_ids: torch.Tensor,
|
2025-12-16 22:06:40 +08:00
|
|
|
# [num_tokens] or [3, num_tokens] when M-RoPE is enabled
|
2025-09-04 11:34:47 +08:00
|
|
|
target_positions: torch.Tensor,
|
|
|
|
|
# [num_tokens, hidden_size]
|
|
|
|
|
target_hidden_states: torch.Tensor,
|
|
|
|
|
# [batch_size]
|
|
|
|
|
next_token_ids: torch.Tensor,
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample: torch.Tensor | None,
|
2025-12-16 22:06:40 +08:00
|
|
|
common_attn_metadata: CommonAttentionMetadata,
|
2026-03-21 16:57:22 +08:00
|
|
|
target_model_batch_desc: BatchDescriptor,
|
2025-09-04 11:34:47 +08:00
|
|
|
sampling_metadata: SamplingMetadata,
|
2026-02-07 09:16:07 +08:00
|
|
|
mm_embed_inputs: tuple[list[torch.Tensor], torch.Tensor] | None = None,
|
2025-12-16 22:06:40 +08:00
|
|
|
req_scheduled_tokens=None,
|
|
|
|
|
long_seq_metadata=None,
|
|
|
|
|
num_prefill_reqs=0,
|
|
|
|
|
num_decode_reqs=0,
|
|
|
|
|
scheduler_output: SchedulerOutput = None,
|
|
|
|
|
num_scheduled_tokens: int = 0,
|
2026-03-13 14:07:35 +08:00
|
|
|
num_rejected_tokens_gpu: torch.Tensor | None = None,
|
2025-09-04 11:34:47 +08:00
|
|
|
) -> torch.Tensor:
|
2026-03-13 14:07:35 +08:00
|
|
|
batch_size = common_attn_metadata.batch_size()
|
2025-12-16 22:06:40 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
if token_indices_to_sample is None:
|
|
|
|
|
token_indices_to_sample = common_attn_metadata.query_start_loc[1:] - 1
|
2025-12-16 22:06:40 +08:00
|
|
|
|
2025-12-29 16:25:52 +08:00
|
|
|
if self.method == "eagle3":
|
2025-12-29 09:54:51 +08:00
|
|
|
assert isinstance(self.get_model(), Eagle3LlamaForCausalLM)
|
2026-02-07 09:16:07 +08:00
|
|
|
target_hidden_states = self.model.combine_hidden_states(target_hidden_states)
|
2025-09-04 11:34:47 +08:00
|
|
|
assert target_hidden_states.shape[-1] == self.hidden_size
|
|
|
|
|
|
2026-03-17 16:14:45 +08:00
|
|
|
num_tokens, token_indices_to_sample, common_attn_metadata, long_seq_args = self.set_inputs_first_pass(
|
2026-03-13 14:07:35 +08:00
|
|
|
target_token_ids=target_token_ids,
|
|
|
|
|
next_token_ids=next_token_ids,
|
|
|
|
|
target_positions=target_positions,
|
|
|
|
|
target_hidden_states=target_hidden_states,
|
|
|
|
|
token_indices_to_sample=token_indices_to_sample,
|
|
|
|
|
cad=common_attn_metadata,
|
|
|
|
|
num_rejected_tokens_gpu=num_rejected_tokens_gpu,
|
2026-03-17 16:14:45 +08:00
|
|
|
req_scheduled_tokens=req_scheduled_tokens,
|
|
|
|
|
long_seq_metadata=long_seq_metadata,
|
|
|
|
|
num_prefill_reqs=num_prefill_reqs,
|
|
|
|
|
num_decode_reqs=num_decode_reqs,
|
2026-03-13 14:07:35 +08:00
|
|
|
)
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
2026-03-17 16:14:45 +08:00
|
|
|
assert long_seq_args is not None
|
|
|
|
|
query_lens_d, ori_token_indices_to_sample = long_seq_args
|
|
|
|
|
assert self.runner is not None
|
2026-02-07 09:16:07 +08:00
|
|
|
if self.use_cuda_graph and num_tokens <= self.runner.cudagraph_batch_sizes[-1]:
|
|
|
|
|
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[num_tokens]
|
2025-09-04 11:34:47 +08:00
|
|
|
else:
|
|
|
|
|
num_input_tokens = num_tokens
|
2025-09-16 11:06:00 +08:00
|
|
|
|
2026-02-02 19:15:31 +08:00
|
|
|
(
|
|
|
|
|
num_input_tokens,
|
|
|
|
|
num_tokens_across_dp,
|
|
|
|
|
_,
|
2026-02-07 09:16:07 +08:00
|
|
|
) = self.runner._sync_metadata_across_dp(num_input_tokens, is_draft_model=True)
|
2026-02-02 19:15:31 +08:00
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
has_lora = len(self.runner.input_batch.lora_id_to_lora_request) > 0
|
|
|
|
|
if self.use_cuda_graph:
|
2026-02-07 09:16:07 +08:00
|
|
|
aclgraph_runtime_mode, batch_descriptor = self.runner.cudagraph_dispatcher.dispatch(
|
2026-03-21 16:57:22 +08:00
|
|
|
num_tokens=num_input_tokens, uniform_decode=target_model_batch_desc.uniform, has_lora=has_lora
|
2026-02-07 09:16:07 +08:00
|
|
|
)
|
2025-12-29 09:54:51 +08:00
|
|
|
else:
|
|
|
|
|
aclgraph_runtime_mode = CUDAGraphMode.NONE
|
|
|
|
|
batch_descriptor = None
|
|
|
|
|
|
2026-03-21 16:57:22 +08:00
|
|
|
if aclgraph_runtime_mode == CUDAGraphMode.FULL:
|
|
|
|
|
# TODO: Due to the inconsistency between the proposer `dispatcher` and model runner, this padding
|
|
|
|
|
# should have been done in model runner but not. For example, at prefill stage, target model
|
|
|
|
|
# is run in eager mode currently, which means `_pad_query_start_loc_for_fia` is not called,
|
|
|
|
|
# while draft model is run in graph model, which means we should pad the `query_start_loc`.
|
|
|
|
|
# Need to be fixed in the future.
|
|
|
|
|
num_reqs_padded = self.runner._pad_query_start_loc_for_fia(
|
|
|
|
|
num_input_tokens, common_attn_metadata.num_reqs, common_attn_metadata.num_reqs
|
|
|
|
|
)
|
|
|
|
|
common_attn_metadata.num_reqs = num_reqs_padded
|
|
|
|
|
common_attn_metadata.query_start_loc = self.runner.query_start_loc.gpu[: num_reqs_padded + 1]
|
|
|
|
|
common_attn_metadata.query_start_loc_cpu = self.runner.query_start_loc.cpu[: num_reqs_padded + 1]
|
|
|
|
|
common_attn_metadata.block_table_tensor = self._pad_tensor(
|
|
|
|
|
common_attn_metadata.block_table_tensor, num_reqs_padded
|
|
|
|
|
)
|
|
|
|
|
common_attn_metadata.seq_lens = self.runner.seq_lens.gpu[:num_reqs_padded]
|
|
|
|
|
common_attn_metadata.seq_lens_cpu = self.runner.seq_lens.cpu[:num_reqs_padded]
|
|
|
|
|
|
2026-01-19 08:58:07 +08:00
|
|
|
if self.supports_mm_inputs:
|
|
|
|
|
mm_embeds, is_mm_embed = mm_embed_inputs or (None, None)
|
|
|
|
|
inputs_embeds = self.model.embed_input_ids(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.input_ids[:num_tokens], multimodal_embeddings=mm_embeds, is_multimodal=is_mm_embed
|
|
|
|
|
)
|
2026-01-19 08:58:07 +08:00
|
|
|
self.inputs_embeds[:num_tokens] = inputs_embeds
|
|
|
|
|
inputs_embeds = self.inputs_embeds[:num_input_tokens]
|
|
|
|
|
else:
|
|
|
|
|
inputs_embeds = None
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# Update slot_mapping for different speculative.
|
|
|
|
|
# NOTE: Currently, we only remake the slot_mapping, because it's the
|
|
|
|
|
# only tensor which will be used in current FIA.
|
|
|
|
|
# Strictly speaking, `query_start_loc`, `seq_lens` should also have
|
|
|
|
|
# their memory allocated separately for each step just like `slot_mapping`.
|
2026-02-27 16:06:56 +08:00
|
|
|
slot_mapping_lens = common_attn_metadata.slot_mapping.shape[0]
|
2026-02-07 09:16:07 +08:00
|
|
|
self.slot_mapping_group[0][:slot_mapping_lens].copy_(common_attn_metadata.slot_mapping[:slot_mapping_lens])
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
self.slot_mapping_group[0][slot_mapping_lens:].fill_(-1)
|
2026-03-06 17:11:22 +08:00
|
|
|
common_attn_metadata.slot_mapping = self.slot_mapping_group[0]
|
2026-02-02 19:15:31 +08:00
|
|
|
common_attn_metadata.num_input_tokens = num_input_tokens
|
2025-12-16 22:06:40 +08:00
|
|
|
# FIXME(woosuk): The below two ops cause synchronization. Optimize.
|
2026-03-23 15:39:24 +08:00
|
|
|
assert len(self.draft_attn_groups) > 0
|
|
|
|
|
builder = self.draft_attn_groups[0].get_metadata_builder()
|
2026-02-07 09:16:07 +08:00
|
|
|
attn_metadata = builder.build(0, common_attn_metadata, self.runner.get_model())
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
|
|
|
|
if self.uses_mrope:
|
2026-03-13 14:07:35 +08:00
|
|
|
used_update_positions = self.mrope_positions[:, token_indices_to_sample]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
else:
|
2026-03-13 14:07:35 +08:00
|
|
|
used_update_positions = self.positions[token_indices_to_sample]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
per_layer_attn_metadata = dict()
|
|
|
|
|
# The first step of speculative.
|
2026-01-15 10:24:35 +08:00
|
|
|
for layer_name in self.attn_layer_names:
|
2026-01-05 14:07:54 +08:00
|
|
|
per_layer_attn_metadata[layer_name] = attn_metadata
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
multi_steps_attn_metadata = [per_layer_attn_metadata]
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
# Copy the old attn_metadata and update
|
2026-02-27 16:06:56 +08:00
|
|
|
attn_metadata_i = per_layer_attn_metadata[self.attn_layer_names[0]]
|
2026-03-25 11:52:04 +08:00
|
|
|
|
|
|
|
|
# Clone the data so that when calculating the data at position 2 and position 3
|
|
|
|
|
# in the merged graph, it does not affect position 1
|
|
|
|
|
# FIXME(lilinsiman)
|
|
|
|
|
common_attn_metadata.block_table_tensor = common_attn_metadata.block_table_tensor.clone()
|
|
|
|
|
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
|
|
|
|
if self.num_speculative_tokens > 1 and not attn_metadata_i.num_prefills:
|
|
|
|
|
# For pcp/dcp, tokens are split across different cp ranks,
|
|
|
|
|
# so we can not simply update slot_mapping by += 1.
|
|
|
|
|
# Instead, we pre-allocate mtp slot_mapping in model_runner
|
|
|
|
|
# (_generate_pcp_mtp_input), and use updated slot_indices
|
|
|
|
|
# to get corresponding slot_mapping in each step.
|
|
|
|
|
num_reject_tokens = (
|
|
|
|
|
torch.tensor(self.runner.pcp_manager.cu_num_tokens_pcp_full, dtype=torch.int32).to(self.device)
|
2026-03-13 14:07:35 +08:00
|
|
|
- ori_token_indices_to_sample
|
2026-02-27 16:06:56 +08:00
|
|
|
- 1
|
|
|
|
|
)
|
|
|
|
|
num_accept_tokens = query_lens_d.to(self.device) - num_reject_tokens
|
2026-03-27 14:24:53 +08:00
|
|
|
ori_seq_len = attn_metadata_i.seq_lens_cpu[:batch_size].clone()
|
2026-02-27 16:06:56 +08:00
|
|
|
mtp_slot_mapping = self.runner.pcp_manager.mtp_slot_pad
|
|
|
|
|
|
|
|
|
|
# slot_mapping index base offset:
|
|
|
|
|
# scheduled tokens + pre-allocated mtp tokens + accepted tokens
|
|
|
|
|
slot_idx_base = (
|
|
|
|
|
torch.cat(
|
|
|
|
|
[
|
|
|
|
|
torch.tensor([0], dtype=torch.int32, device=self.device),
|
|
|
|
|
(torch.cumsum(query_lens_d, dim=0)[:-1] * self.pcp_size).to(self.device),
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
+ torch.arange(num_decode_reqs, device=self.device)
|
|
|
|
|
* (self.num_speculative_tokens - 1)
|
|
|
|
|
* self.pcp_size
|
|
|
|
|
+ (num_accept_tokens - 1) * self.pcp_size
|
|
|
|
|
)
|
|
|
|
|
slot_indices_list = []
|
|
|
|
|
for req_id in range(num_decode_reqs):
|
|
|
|
|
slot_indices_list.append(
|
|
|
|
|
torch.arange(slot_idx_base[req_id], slot_idx_base[req_id] + self.pcp_size, device=self.device)
|
|
|
|
|
)
|
|
|
|
|
slot_indices = torch.cat(slot_indices_list, dim=0)
|
|
|
|
|
|
|
|
|
|
# fold block_table (restore it to original size before flattened)
|
|
|
|
|
block_indices = torch.cat(
|
|
|
|
|
[torch.tensor([0], dtype=torch.int32), torch.cumsum(query_lens_d, dim=0)[:-1]]
|
|
|
|
|
)
|
|
|
|
|
common_attn_metadata.block_table_tensor[:batch_size] = common_attn_metadata.block_table_tensor[
|
|
|
|
|
block_indices
|
|
|
|
|
]
|
|
|
|
|
common_attn_metadata.block_table_tensor = common_attn_metadata.block_table_tensor[:batch_size]
|
|
|
|
|
|
|
|
|
|
# Copy the old attn_metadata and update
|
2026-03-13 14:07:35 +08:00
|
|
|
if not self.parallel_drafting:
|
|
|
|
|
for draft_step in range(1, self.num_speculative_tokens):
|
2026-03-13 16:14:15 +08:00
|
|
|
per_layer_attn_metadata = dict()
|
2026-03-18 09:24:43 +08:00
|
|
|
for attn_group in self.draft_attn_groups:
|
2026-03-13 16:14:15 +08:00
|
|
|
common_attn_metadata, attn_metadata = self.attn_update_stack_num_spec_norm(
|
|
|
|
|
draft_step,
|
|
|
|
|
attn_metadata,
|
|
|
|
|
common_attn_metadata,
|
|
|
|
|
batch_size,
|
|
|
|
|
num_input_tokens,
|
|
|
|
|
used_update_positions,
|
|
|
|
|
aclgraph_runtime_mode,
|
|
|
|
|
ori_seq_len,
|
|
|
|
|
slot_indices,
|
|
|
|
|
mtp_slot_mapping,
|
2026-03-18 09:24:43 +08:00
|
|
|
attn_group=attn_group,
|
2026-03-13 16:14:15 +08:00
|
|
|
)
|
|
|
|
|
for layer_name in self.attn_layer_names:
|
|
|
|
|
per_layer_attn_metadata[layer_name] = attn_metadata
|
|
|
|
|
multi_steps_attn_metadata.append(per_layer_attn_metadata)
|
|
|
|
|
else:
|
|
|
|
|
# Copy the old attn_metadata and update
|
2026-03-14 18:26:37 +08:00
|
|
|
if not self.parallel_drafting:
|
|
|
|
|
for draft_step in range(1, self.num_speculative_tokens):
|
|
|
|
|
per_layer_attn_metadata = dict()
|
2026-03-18 09:24:43 +08:00
|
|
|
for attn_group in self.draft_attn_groups:
|
2026-03-13 14:07:35 +08:00
|
|
|
common_attn_metadata, attn_metadata = self.attn_update_stack_num_spec_norm(
|
|
|
|
|
draft_step,
|
|
|
|
|
attn_metadata,
|
|
|
|
|
common_attn_metadata,
|
|
|
|
|
batch_size,
|
|
|
|
|
num_input_tokens,
|
|
|
|
|
used_update_positions,
|
|
|
|
|
aclgraph_runtime_mode,
|
2026-03-18 09:24:43 +08:00
|
|
|
attn_group=attn_group,
|
2026-03-13 14:07:35 +08:00
|
|
|
)
|
|
|
|
|
for layer_name in self.attn_layer_names:
|
|
|
|
|
per_layer_attn_metadata[layer_name] = attn_metadata
|
2026-03-14 18:26:37 +08:00
|
|
|
multi_steps_attn_metadata.append(per_layer_attn_metadata)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample_len = token_indices_to_sample.shape[0]
|
|
|
|
|
self.token_indices_to_sample[:token_indices_to_sample_len].copy_(token_indices_to_sample)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
with set_ascend_forward_context(
|
2026-02-07 09:16:07 +08:00
|
|
|
multi_steps_attn_metadata[0],
|
|
|
|
|
self.vllm_config,
|
|
|
|
|
num_tokens=num_input_tokens,
|
|
|
|
|
num_tokens_across_dp=num_tokens_across_dp,
|
|
|
|
|
num_actual_tokens=num_tokens,
|
|
|
|
|
batch_descriptor=batch_descriptor,
|
|
|
|
|
aclgraph_runtime_mode=aclgraph_runtime_mode,
|
|
|
|
|
is_draft_model=True,
|
|
|
|
|
draft_attn_metadatas=multi_steps_attn_metadata,
|
|
|
|
|
):
|
2026-02-27 16:05:21 +08:00
|
|
|
# Reset MOE layer index for forward pass
|
|
|
|
|
forward_context = get_forward_context()
|
|
|
|
|
if forward_context is not None:
|
|
|
|
|
forward_context.moe_layer_index = 0
|
2026-02-05 19:31:17 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
draft_token_ids = self._runnable(
|
|
|
|
|
num_input_tokens=num_input_tokens,
|
|
|
|
|
batch_size=batch_size,
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample=self.token_indices_to_sample[:token_indices_to_sample_len],
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
target_positions=target_positions,
|
|
|
|
|
inputs_embeds=inputs_embeds,
|
2026-02-07 09:16:07 +08:00
|
|
|
multi_steps_attn_metadata=multi_steps_attn_metadata,
|
2026-02-27 16:06:56 +08:00
|
|
|
num_tokens=num_tokens,
|
|
|
|
|
is_prefill=attn_metadata_i.num_prefills,
|
2026-02-07 09:16:07 +08:00
|
|
|
)
|
2026-01-14 09:00:37 +08:00
|
|
|
|
2026-01-15 10:24:35 +08:00
|
|
|
forward_context = get_forward_context()
|
2025-12-29 09:54:51 +08:00
|
|
|
if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL:
|
2026-02-07 09:16:07 +08:00
|
|
|
self._update_full_graph_params(forward_context, num_input_tokens, multi_steps_attn_metadata)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
return draft_token_ids
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
def _run_merged_draft(
|
|
|
|
|
self,
|
|
|
|
|
num_input_tokens,
|
|
|
|
|
batch_size,
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample,
|
2026-02-07 09:16:07 +08:00
|
|
|
target_positions,
|
|
|
|
|
inputs_embeds,
|
|
|
|
|
multi_steps_attn_metadata,
|
2026-02-27 16:06:56 +08:00
|
|
|
num_tokens,
|
|
|
|
|
is_prefill=None,
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
) -> torch.Tensor:
|
2026-02-07 09:16:07 +08:00
|
|
|
# The lifecycle of `input_ids`, `positions`, `hidden_states` runs through all
|
|
|
|
|
# speculative tokens' proposings. `model_input_ids`, `model_positions` and
|
|
|
|
|
# `model_hidden_states` represent the speculative model inputs.
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
model_input_ids = self.input_ids[:num_input_tokens]
|
|
|
|
|
model_positions = self._get_positions(num_input_tokens)
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
model_kwargs = {
|
|
|
|
|
"input_ids": model_input_ids,
|
|
|
|
|
"positions": model_positions,
|
|
|
|
|
"inputs_embeds": inputs_embeds,
|
|
|
|
|
}
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
if self.pass_hidden_states_to_model:
|
|
|
|
|
model_hidden_states = self.hidden_states[:num_input_tokens]
|
|
|
|
|
model_hidden_states, model_positions = self.maybe_pad_and_reduce(model_hidden_states, model_positions)
|
|
|
|
|
model_kwargs["hidden_states"] = model_hidden_states
|
|
|
|
|
if self.method == "mtp":
|
|
|
|
|
model_kwargs["positions"] = model_positions
|
|
|
|
|
|
|
|
|
|
ret_hidden_states = self.model(**model_kwargs)
|
|
|
|
|
if not self.model_returns_tuple():
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
last_hidden_states = ret_hidden_states
|
|
|
|
|
hidden_states = last_hidden_states
|
|
|
|
|
else:
|
|
|
|
|
last_hidden_states, hidden_states = ret_hidden_states
|
|
|
|
|
|
|
|
|
|
last_hidden_states, model_positions, hidden_states = self.maybe_all_gather_and_unpad(
|
2026-02-07 09:16:07 +08:00
|
|
|
last_hidden_states, model_positions, hidden_states
|
|
|
|
|
)
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
num_indices = token_indices_to_sample.shape[0]
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size > 1:
|
|
|
|
|
# remove graph padding before all_gather
|
|
|
|
|
hidden_states = hidden_states[:num_tokens]
|
|
|
|
|
hidden_states = get_pcp_group().all_gather(hidden_states, 0)
|
|
|
|
|
hidden_states = torch.index_select(
|
|
|
|
|
hidden_states, 0, self.runner.pcp_manager.pcp_allgather_restore_idx.gpu[: hidden_states.shape[0]]
|
|
|
|
|
)
|
2026-03-06 20:49:49 +08:00
|
|
|
if self.method == "mtp":
|
|
|
|
|
last_hidden_states = hidden_states
|
|
|
|
|
else:
|
|
|
|
|
# eagle and eagle3 need allgather last_hidden_states.
|
|
|
|
|
last_hidden_states = last_hidden_states[:num_tokens]
|
|
|
|
|
last_hidden_states = get_pcp_group().all_gather(last_hidden_states, 0)
|
|
|
|
|
last_hidden_states = torch.index_select(
|
|
|
|
|
last_hidden_states,
|
|
|
|
|
0,
|
|
|
|
|
self.runner.pcp_manager.pcp_allgather_restore_idx.gpu[: last_hidden_states.shape[0]],
|
|
|
|
|
)
|
2026-02-27 16:06:56 +08:00
|
|
|
|
2026-03-23 18:53:07 +08:00
|
|
|
if lmhead_tp_enable():
|
2026-02-02 19:15:31 +08:00
|
|
|
max_num_reqs_across_dp = (
|
2026-02-07 09:16:07 +08:00
|
|
|
self.vllm_config.scheduler_config.max_num_seqs * self.runner.uniform_decode_query_len
|
|
|
|
|
)
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample = nn.functional.pad(
|
|
|
|
|
token_indices_to_sample, (0, max_num_reqs_across_dp - num_indices)
|
|
|
|
|
)
|
2026-02-02 19:15:31 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
sample_hidden_states = last_hidden_states[token_indices_to_sample]
|
2025-10-09 10:28:38 +08:00
|
|
|
logits = self.model.compute_logits(sample_hidden_states)
|
2026-02-02 19:15:31 +08:00
|
|
|
|
2026-03-23 18:53:07 +08:00
|
|
|
if lmhead_tp_enable() and num_indices < logits.shape[0]:
|
2026-02-02 19:15:31 +08:00
|
|
|
logits = logits[:num_indices]
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample = token_indices_to_sample[:num_indices]
|
2026-02-02 19:15:31 +08:00
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
draft_token_ids = logits.argmax(dim=-1)
|
|
|
|
|
|
|
|
|
|
# Early exit if there is only one draft token to be generated.
|
2026-03-13 14:07:35 +08:00
|
|
|
if self.num_speculative_tokens == 1 or self.parallel_drafting:
|
2025-09-04 11:34:47 +08:00
|
|
|
# [batch_size, 1]
|
2026-03-13 14:07:35 +08:00
|
|
|
return draft_token_ids.view(-1, self.num_speculative_tokens)
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1 and is_prefill:
|
|
|
|
|
draft_token_ids = logits.argmax(dim=-1)
|
|
|
|
|
draft_token_ids_list = []
|
|
|
|
|
for _ in range(self.num_speculative_tokens):
|
|
|
|
|
draft_token_ids_list.append(draft_token_ids)
|
|
|
|
|
return torch.stack(draft_token_ids_list, dim=1)
|
|
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
# Generate the remaining draft tokens.
|
|
|
|
|
draft_token_ids_tensor = torch.zeros(
|
2026-02-07 09:16:07 +08:00
|
|
|
(self.num_speculative_tokens, *draft_token_ids.shape), dtype=draft_token_ids.dtype, device=self.device
|
|
|
|
|
)
|
2025-09-04 11:34:47 +08:00
|
|
|
draft_token_ids_tensor[0] = draft_token_ids
|
2026-01-19 08:58:07 +08:00
|
|
|
if self.uses_mrope:
|
2026-03-13 14:07:35 +08:00
|
|
|
positions = self.mrope_positions[:, token_indices_to_sample]
|
2026-01-19 08:58:07 +08:00
|
|
|
else:
|
2026-03-13 14:07:35 +08:00
|
|
|
positions = self.positions[token_indices_to_sample]
|
|
|
|
|
hidden_states = hidden_states[token_indices_to_sample]
|
|
|
|
|
token_indices_to_sample = self.arange[:batch_size]
|
2025-12-16 22:06:40 +08:00
|
|
|
|
2026-02-07 09:30:10 +08:00
|
|
|
input_batch_size = num_input_tokens if (self.method == "mtp" or self.use_cuda_graph) else batch_size
|
2025-09-16 11:06:00 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
forward_context = get_forward_context()
|
2026-03-13 09:11:46 +08:00
|
|
|
_EXTRA_CTX.num_tokens = input_batch_size
|
|
|
|
|
_EXTRA_CTX.num_accept_tokens = batch_size
|
2026-01-20 10:06:00 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
for draft_step in range(self.num_speculative_tokens - 1):
|
2026-02-27 16:05:21 +08:00
|
|
|
# Reset MOE layer index for each draft step iteration
|
|
|
|
|
forward_context = get_forward_context()
|
|
|
|
|
if forward_context is not None:
|
|
|
|
|
forward_context.moe_layer_index = 0
|
2026-02-07 09:16:07 +08:00
|
|
|
|
2025-09-04 11:34:47 +08:00
|
|
|
# Update the inputs.
|
|
|
|
|
# cast to int32 is crucial when eagle model is compiled.
|
|
|
|
|
# tensor.argmax() returns int64 by default.
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
input_ids = draft_token_ids_tensor[draft_step]
|
2025-12-16 22:06:40 +08:00
|
|
|
positions += 1
|
2025-09-04 11:34:47 +08:00
|
|
|
|
|
|
|
|
# NOTE(woosuk): We should handle the case where the draft model
|
|
|
|
|
# generates tokens beyond the max model length. Since it is complex
|
|
|
|
|
# to remove such requests from the batch, we keep them in the batch
|
|
|
|
|
# but adjust the position ids and slot mappings to avoid the
|
|
|
|
|
# out-of-range access during the model execution. The draft tokens
|
|
|
|
|
# generated with this adjustment should be ignored.
|
2026-01-19 08:58:07 +08:00
|
|
|
if self.uses_mrope:
|
2026-02-07 09:16:07 +08:00
|
|
|
exceeds_max_model_len = positions[0] >= self.vllm_config.model_config.max_model_len
|
2026-01-19 08:58:07 +08:00
|
|
|
# Mask out the position ids that exceed the max model length.
|
|
|
|
|
# Otherwise, we may get out-of-range error in RoPE.
|
|
|
|
|
clamped_positions = torch.where(
|
2026-02-07 09:16:07 +08:00
|
|
|
exceeds_max_model_len.unsqueeze(0), torch.zeros_like(positions), positions
|
|
|
|
|
)
|
2026-01-19 08:58:07 +08:00
|
|
|
else:
|
|
|
|
|
exceeds_max_model_len = positions >= self.vllm_config.model_config.max_model_len
|
2026-02-07 09:16:07 +08:00
|
|
|
clamped_positions = torch.where(exceeds_max_model_len, 0, positions)
|
2025-09-04 11:34:47 +08:00
|
|
|
|
|
|
|
|
# copy inputs to buffer for cudagraph
|
|
|
|
|
self.input_ids[:batch_size] = input_ids
|
2026-01-19 08:58:07 +08:00
|
|
|
self._set_positions(batch_size, clamped_positions)
|
2025-09-04 11:34:47 +08:00
|
|
|
self.hidden_states[:batch_size] = hidden_states
|
2026-01-19 08:58:07 +08:00
|
|
|
if self.supports_mm_inputs:
|
2026-02-07 09:16:07 +08:00
|
|
|
self.inputs_embeds[:batch_size] = self.model.embed_input_ids(input_ids)
|
2026-01-19 08:58:07 +08:00
|
|
|
|
|
|
|
|
input_ids = self.input_ids[:input_batch_size]
|
|
|
|
|
inputs_embeds = self.inputs_embeds[:input_batch_size]
|
|
|
|
|
else:
|
|
|
|
|
input_ids = self.input_ids[:input_batch_size]
|
|
|
|
|
inputs_embeds = None
|
2025-12-17 08:53:44 +08:00
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
# Run the model.
|
2026-01-15 10:24:35 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
# The lifecycle of `input_ids`, `positions`, `hidden_states` runs through all
|
|
|
|
|
# speculative tokens' proposings. `model_input_ids`, `model_positions` and
|
|
|
|
|
# `model_hidden_states` represent the speculative model inputs.
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
model_input_ids = self.input_ids[:input_batch_size]
|
|
|
|
|
model_positions = self._get_positions(input_batch_size)
|
|
|
|
|
model_hidden_states = self.hidden_states[:input_batch_size]
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
model_hidden_states, model_positions = self.maybe_pad_and_reduce(model_hidden_states, model_positions)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
forward_context.attn_metadata = (
|
|
|
|
|
multi_steps_attn_metadata[draft_step + 1] if multi_steps_attn_metadata else None
|
|
|
|
|
)
|
2026-03-13 14:07:35 +08:00
|
|
|
|
|
|
|
|
model_kwargs = {
|
|
|
|
|
"input_ids": model_input_ids,
|
|
|
|
|
"positions": model_positions,
|
|
|
|
|
"inputs_embeds": inputs_embeds,
|
|
|
|
|
}
|
|
|
|
|
if self.pass_hidden_states_to_model:
|
|
|
|
|
model_kwargs["hidden_states"] = model_hidden_states
|
|
|
|
|
|
|
|
|
|
ret_hidden_states = self.model(**model_kwargs)
|
|
|
|
|
if not self.model_returns_tuple():
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
last_hidden_states = ret_hidden_states
|
|
|
|
|
hidden_states = last_hidden_states
|
|
|
|
|
else:
|
|
|
|
|
last_hidden_states, hidden_states = ret_hidden_states
|
|
|
|
|
|
|
|
|
|
last_hidden_states, model_positions, hidden_states = self.maybe_all_gather_and_unpad(
|
2026-02-07 09:16:07 +08:00
|
|
|
last_hidden_states, model_positions, hidden_states
|
|
|
|
|
)
|
2026-01-08 15:33:52 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
num_indices = token_indices_to_sample.shape[0]
|
2026-03-23 18:53:07 +08:00
|
|
|
if lmhead_tp_enable():
|
2026-02-02 19:15:31 +08:00
|
|
|
max_num_reqs_across_dp = (
|
2026-02-07 09:16:07 +08:00
|
|
|
self.vllm_config.scheduler_config.max_num_seqs * self.runner.uniform_decode_query_len
|
|
|
|
|
)
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample = nn.functional.pad(
|
|
|
|
|
token_indices_to_sample,
|
2026-02-02 19:15:31 +08:00
|
|
|
(0, max_num_reqs_across_dp - num_indices),
|
|
|
|
|
)
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
sample_hidden_states = last_hidden_states[token_indices_to_sample]
|
2026-02-02 19:15:31 +08:00
|
|
|
logits = self.model.compute_logits(sample_hidden_states)
|
|
|
|
|
|
2026-03-23 18:53:07 +08:00
|
|
|
if lmhead_tp_enable() and num_indices < logits.shape[0]:
|
2026-02-02 19:15:31 +08:00
|
|
|
logits = logits[:num_indices]
|
2026-03-13 14:07:35 +08:00
|
|
|
token_indices_to_sample = token_indices_to_sample[:num_indices]
|
2025-09-04 11:34:47 +08:00
|
|
|
|
|
|
|
|
# TODO(wenlong): get more than one token for tree attention
|
2026-02-02 19:15:31 +08:00
|
|
|
hidden_states = hidden_states[:batch_size]
|
2025-09-04 11:34:47 +08:00
|
|
|
draft_token_ids = logits.argmax(dim=-1)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
draft_token_ids_tensor[draft_step + 1] = draft_token_ids
|
2025-09-04 11:34:47 +08:00
|
|
|
|
|
|
|
|
# [batch_size, num_speculative_tokens]
|
|
|
|
|
draft_token_ids = draft_token_ids_tensor.swapaxes(0, 1)
|
|
|
|
|
return draft_token_ids
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
def set_inputs_first_pass(
|
|
|
|
|
self,
|
|
|
|
|
target_token_ids: torch.Tensor,
|
|
|
|
|
next_token_ids: torch.Tensor,
|
|
|
|
|
target_positions: torch.Tensor,
|
|
|
|
|
target_hidden_states: torch.Tensor,
|
|
|
|
|
token_indices_to_sample: torch.Tensor | None,
|
|
|
|
|
cad: CommonAttentionMetadata,
|
|
|
|
|
num_rejected_tokens_gpu: torch.Tensor | None,
|
2026-03-17 16:14:45 +08:00
|
|
|
req_scheduled_tokens=None,
|
|
|
|
|
long_seq_metadata=None,
|
|
|
|
|
num_prefill_reqs=0,
|
|
|
|
|
num_decode_reqs=0,
|
|
|
|
|
) -> tuple[int, torch.Tensor, CommonAttentionMetadata, tuple[Any, Any] | None]:
|
2026-03-13 14:07:35 +08:00
|
|
|
if not self.needs_extra_input_slots:
|
|
|
|
|
# Default EAGLE pathway: no reshaping of input tensors needed.
|
|
|
|
|
# Simply rotate the input ids and leave the positions unchanged,
|
|
|
|
|
# Inserting the next token ids at the last slot in each request.
|
|
|
|
|
if token_indices_to_sample is None:
|
|
|
|
|
token_indices_to_sample = cad.query_start_loc[1:] - 1
|
|
|
|
|
|
|
|
|
|
num_tokens = target_token_ids.shape[0]
|
|
|
|
|
# Shift the input ids by one token.
|
|
|
|
|
# E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3]
|
|
|
|
|
self.input_ids[: num_tokens - 1] = target_token_ids[1:]
|
|
|
|
|
# Replace the last token with the next token.
|
|
|
|
|
# E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4]
|
|
|
|
|
self.input_ids[token_indices_to_sample] = next_token_ids
|
|
|
|
|
|
2026-03-17 16:14:45 +08:00
|
|
|
assert self.runner is not None
|
|
|
|
|
# update pcp related params
|
|
|
|
|
ori_token_indices_to_sample = None
|
|
|
|
|
query_lens_d = None
|
|
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
|
|
|
|
assert long_seq_metadata is not None
|
|
|
|
|
cad.prefill_context_parallel_metadata = long_seq_metadata
|
|
|
|
|
ori_token_indices_to_sample = token_indices_to_sample.clone()
|
|
|
|
|
query_lens_d = self.runner.query_lens[:num_decode_reqs]
|
|
|
|
|
if self.pcp_size > 1:
|
|
|
|
|
# 1. preprocess decode/prefill input_ids & target_hidden_states
|
|
|
|
|
# decode input_ids: keep unchanged
|
|
|
|
|
# decode target_hidden_states: remove padding
|
|
|
|
|
# prefill input_ids: add padding and pcp split
|
|
|
|
|
# prefill target_hidden_states: pcp split
|
|
|
|
|
assert query_lens_d is not None
|
|
|
|
|
num_tokens_d = query_lens_d.sum().item()
|
|
|
|
|
num_tokens_d_padded = num_tokens_d * self.pcp_size
|
|
|
|
|
input_ids_d = self.input_ids[:num_tokens_d]
|
|
|
|
|
input_ids_p = self.input_ids[num_tokens_d:num_tokens]
|
|
|
|
|
target_hidden_states_d_padded = target_hidden_states[:num_tokens_d_padded]
|
|
|
|
|
if num_tokens_d:
|
|
|
|
|
# remove padding (from pcp all-gather) in decode part
|
|
|
|
|
mask_start_loc = torch.cat(
|
|
|
|
|
[torch.tensor([0], dtype=torch.int32), torch.cumsum(query_lens_d * self.pcp_size, dim=0)[:-1]]
|
|
|
|
|
)
|
|
|
|
|
mask_len = query_lens_d
|
|
|
|
|
mask = []
|
|
|
|
|
for req_id in range(num_decode_reqs):
|
|
|
|
|
assert None not in (mask_start_loc, mask_len)
|
|
|
|
|
mask += list(range(mask_start_loc[req_id], mask_start_loc[req_id] + mask_len[req_id]))
|
|
|
|
|
target_hidden_states_d = target_hidden_states_d_padded[mask]
|
|
|
|
|
else:
|
|
|
|
|
target_hidden_states_d = target_hidden_states_d_padded
|
|
|
|
|
target_hidden_states_p = target_hidden_states[num_tokens_d_padded:]
|
|
|
|
|
req_scheduled_tokens_p = {}
|
|
|
|
|
for i, req_id in enumerate(self.runner.input_batch.req_ids):
|
|
|
|
|
if i >= num_decode_reqs:
|
|
|
|
|
req_scheduled_tokens_p[req_id] = req_scheduled_tokens[req_id]
|
|
|
|
|
(num_tokens_p, input_ids_p, target_hidden_states_p, max_query_len_p, seq_lens_p, cu_num_tokens_p) = (
|
|
|
|
|
self._split_pcp_input(req_scheduled_tokens_p, input_ids_p, target_hidden_states_p)
|
|
|
|
|
)
|
|
|
|
|
num_tokens = num_tokens_d + num_tokens_p
|
|
|
|
|
target_positions = target_positions[:num_tokens]
|
|
|
|
|
self.input_ids[:num_tokens].copy_(torch.cat([input_ids_d, input_ids_p], dim=0))
|
|
|
|
|
target_hidden_states = torch.cat([target_hidden_states_d, target_hidden_states_p], dim=0)
|
|
|
|
|
# 2. update sample_indices according to main model
|
|
|
|
|
if num_decode_reqs:
|
|
|
|
|
token_indices_to_sample[:num_decode_reqs] = self.runner.logits_indices[
|
|
|
|
|
token_indices_to_sample[:num_decode_reqs]
|
|
|
|
|
]
|
|
|
|
|
if num_prefill_reqs:
|
|
|
|
|
token_indices_to_sample[-num_prefill_reqs:] = self.runner.logits_indices[-num_prefill_reqs:]
|
|
|
|
|
# 3. update attn_metadata params that may be influenced by pcp
|
|
|
|
|
cad.num_actual_tokens = num_tokens
|
|
|
|
|
cad.max_query_len = max(self.decode_threshold, max_query_len_p)
|
|
|
|
|
cad.seq_lens[-num_prefill_reqs:] = seq_lens_p
|
|
|
|
|
cad.seq_lens_cpu[-num_prefill_reqs:] = seq_lens_p
|
|
|
|
|
query_start_loc_p = cu_num_tokens_p[1:] + cad.query_start_loc[num_decode_reqs].item()
|
|
|
|
|
cad.query_start_loc[-num_prefill_reqs:] = query_start_loc_p
|
|
|
|
|
cad.query_start_loc_cpu[-num_prefill_reqs:] = query_start_loc_p
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
# copy inputs to buffer for cudagraph
|
|
|
|
|
if self.uses_xdrope_dim > 0 and self.draft_uses_xdrope_dim == 0:
|
|
|
|
|
target_positions = target_positions[0]
|
|
|
|
|
|
|
|
|
|
self._set_positions(num_tokens, target_positions)
|
|
|
|
|
self.hidden_states[:num_tokens] = target_hidden_states
|
|
|
|
|
|
2026-03-17 16:14:45 +08:00
|
|
|
return num_tokens, token_indices_to_sample, cad, (query_lens_d, ori_token_indices_to_sample)
|
2026-03-13 14:07:35 +08:00
|
|
|
else:
|
|
|
|
|
assert self.is_rejected_token_mask is not None
|
|
|
|
|
assert self.is_masked_token_mask is not None
|
|
|
|
|
# 1.
|
|
|
|
|
# Call the CopyAndExpandEagleInputs AscendC operator to copy
|
|
|
|
|
# input_ids and positions into the correct slots in the
|
|
|
|
|
# preallocated buffers self.input_ids, self.positions.
|
|
|
|
|
batch_size = cad.batch_size()
|
|
|
|
|
total_num_input_tokens = target_token_ids.shape[0]
|
|
|
|
|
total_num_output_tokens = total_num_input_tokens + (self.net_num_new_slots_per_request * batch_size)
|
|
|
|
|
|
|
|
|
|
query_start_loc = cad.query_start_loc
|
|
|
|
|
query_end_loc = cad.query_start_loc[1:] - 1
|
|
|
|
|
if num_rejected_tokens_gpu is not None:
|
|
|
|
|
query_end_loc = query_end_loc - num_rejected_tokens_gpu
|
|
|
|
|
|
|
|
|
|
(
|
|
|
|
|
out_input_ids,
|
|
|
|
|
out_positions,
|
|
|
|
|
out_is_rejected_token_mask,
|
|
|
|
|
out_is_masked_token_mask,
|
|
|
|
|
token_indices_to_sample,
|
|
|
|
|
out_hidden_state_mapping,
|
|
|
|
|
) = torch.ops._C_ascend.npu_copy_and_expand_eagle_inputs(
|
|
|
|
|
target_token_ids,
|
|
|
|
|
target_positions.to(torch.int32),
|
|
|
|
|
next_token_ids,
|
|
|
|
|
query_start_loc,
|
|
|
|
|
query_end_loc,
|
|
|
|
|
0, # padding_token_id
|
|
|
|
|
self.parallel_drafting_token_id,
|
|
|
|
|
self.extra_slots_per_request,
|
|
|
|
|
self.pass_hidden_states_to_model,
|
|
|
|
|
total_num_output_tokens,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Copy returned tensors into pre-allocated buffers
|
|
|
|
|
self.input_ids[:total_num_output_tokens].copy_(out_input_ids)
|
|
|
|
|
self.positions[:total_num_output_tokens].copy_(out_positions)
|
|
|
|
|
self.is_rejected_token_mask[:total_num_output_tokens].copy_(out_is_rejected_token_mask)
|
|
|
|
|
self.is_masked_token_mask[:total_num_output_tokens].copy_(out_is_masked_token_mask)
|
|
|
|
|
if self.pass_hidden_states_to_model:
|
|
|
|
|
assert self.parallel_drafting_hidden_state_tensor is not None
|
|
|
|
|
self.hidden_states[out_hidden_state_mapping] = target_hidden_states
|
|
|
|
|
# Use torch.where to avoid DtoH sync from boolean indexing
|
|
|
|
|
mask = self.is_masked_token_mask[:total_num_output_tokens]
|
|
|
|
|
torch.where(
|
2026-03-17 16:14:45 +08:00
|
|
|
mask.unsqueeze(1), # type: ignore
|
2026-03-13 14:07:35 +08:00
|
|
|
self.parallel_drafting_hidden_state_tensor,
|
|
|
|
|
self.hidden_states[:total_num_output_tokens],
|
|
|
|
|
out=self.hidden_states[:total_num_output_tokens],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 2.
|
|
|
|
|
# Recompute the slot mapping based on the new positions and
|
|
|
|
|
# rejection mask.
|
2026-03-18 09:24:43 +08:00
|
|
|
# Use the first draft attention group's kv_cache_spec for block_size
|
|
|
|
|
# (all draft layers share the same kv-cache group)
|
|
|
|
|
assert len(self.draft_attn_groups) > 0
|
|
|
|
|
block_size = self.draft_attn_groups[0].kv_cache_spec.block_size
|
|
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
new_slot_mapping = compute_new_slot_mapping(
|
|
|
|
|
cad=cad,
|
|
|
|
|
new_positions=self.positions[:total_num_output_tokens],
|
|
|
|
|
is_rejected_token_mask=self.is_rejected_token_mask[:total_num_output_tokens],
|
2026-03-14 18:26:37 +08:00
|
|
|
block_size=block_size,
|
2026-03-13 14:07:35 +08:00
|
|
|
num_new_tokens=self.net_num_new_slots_per_request,
|
|
|
|
|
max_model_len=self.max_model_len,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 3. Update the common attention metadata with the new (meta)data
|
|
|
|
|
new_cad = extend_all_queries_by_N(
|
|
|
|
|
cad,
|
|
|
|
|
N=self.net_num_new_slots_per_request,
|
|
|
|
|
arange=self.arange,
|
|
|
|
|
new_slot_mapping=new_slot_mapping,
|
|
|
|
|
)
|
|
|
|
|
|
2026-03-17 16:14:45 +08:00
|
|
|
return total_num_output_tokens, token_indices_to_sample, new_cad, None
|
2026-03-13 14:07:35 +08:00
|
|
|
|
|
|
|
|
def model_returns_tuple(self) -> bool:
|
|
|
|
|
return self.method not in ("mtp", "draft_model")
|
|
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
def attn_update_stack_num_spec_norm(
|
|
|
|
|
self,
|
|
|
|
|
# `draft_step` must start from `1`, no `0`
|
|
|
|
|
draft_step,
|
|
|
|
|
old_attn_metadata,
|
|
|
|
|
old_common_metadata,
|
|
|
|
|
batch_size,
|
|
|
|
|
input_batch_size,
|
|
|
|
|
used_update_positions,
|
|
|
|
|
aclgraph_runtime_mode,
|
2026-02-27 16:06:56 +08:00
|
|
|
ori_seq_len=None,
|
|
|
|
|
slot_indices=None,
|
|
|
|
|
mtp_slot_mapping=None,
|
2026-03-13 16:14:15 +08:00
|
|
|
attn_group=None,
|
2026-02-07 09:16:07 +08:00
|
|
|
):
|
|
|
|
|
assert draft_step > 0
|
2026-03-18 09:24:43 +08:00
|
|
|
assert attn_group is not None, "vllm-ascend v0.17.0rc1 requires attn_group"
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata = self.shallow_copy_metadata(old_common_metadata)
|
|
|
|
|
|
|
|
|
|
if draft_step == 1:
|
2026-03-21 16:57:22 +08:00
|
|
|
if aclgraph_runtime_mode == CUDAGraphMode.FULL:
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.num_reqs = input_batch_size
|
|
|
|
|
common_attn_metadata.block_table_tensor = self._pad_tensor(
|
2026-03-21 16:57:22 +08:00
|
|
|
common_attn_metadata.block_table_tensor, input_batch_size
|
|
|
|
|
)
|
|
|
|
|
common_attn_metadata.seq_lens = self._pad_tensor(common_attn_metadata.seq_lens, input_batch_size)
|
|
|
|
|
common_attn_metadata.seq_lens_cpu = self._pad_tensor(
|
|
|
|
|
common_attn_metadata.seq_lens_cpu, input_batch_size
|
2026-02-07 09:16:07 +08:00
|
|
|
)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.num_computed_tokens_cpu = self._pad_tensor(
|
2026-03-21 16:57:22 +08:00
|
|
|
common_attn_metadata.num_computed_tokens_cpu, input_batch_size
|
2026-02-07 09:16:07 +08:00
|
|
|
)
|
|
|
|
|
common_attn_metadata.query_start_loc = self.arange[: input_batch_size + 1]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.query_start_loc_cpu = torch.from_numpy(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.token_arange_np[: input_batch_size + 1]
|
|
|
|
|
).clone()
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
common_attn_metadata.query_start_loc = self.arange[: batch_size + 1]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.query_start_loc_cpu = torch.from_numpy(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.token_arange_np[: batch_size + 1]
|
|
|
|
|
).clone()
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
|
|
|
|
common_attn_metadata.num_actual_tokens = batch_size
|
|
|
|
|
common_attn_metadata.max_query_len = 1
|
|
|
|
|
common_attn_metadata.decode_token_per_req = 1
|
2026-03-06 17:11:22 +08:00
|
|
|
common_attn_metadata.attn_state = (
|
|
|
|
|
AscendAttentionState.SpecDecoding if self.method == "mtp" else AscendAttentionState.ChunkedPrefill
|
|
|
|
|
)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.graph_pad_size = -1
|
|
|
|
|
common_attn_metadata.num_input_tokens = input_batch_size
|
2026-02-07 09:16:07 +08:00
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# The loop part
|
|
|
|
|
used_update_positions += 1
|
|
|
|
|
|
2026-03-25 11:52:04 +08:00
|
|
|
# Clone the data so that when calculating the data at position 2 and position 3
|
|
|
|
|
# in the merged graph, it does not affect position 1
|
|
|
|
|
# FIXME(lilinsiman)
|
|
|
|
|
common_attn_metadata.seq_lens = common_attn_metadata.seq_lens.clone()
|
|
|
|
|
common_attn_metadata.seq_lens_cpu = common_attn_metadata.seq_lens_cpu.clone()
|
|
|
|
|
common_attn_metadata.num_computed_tokens_cpu = common_attn_metadata.num_computed_tokens_cpu.clone()
|
|
|
|
|
common_attn_metadata.positions = common_attn_metadata.positions.clone()
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# NOTE(woosuk): We should handle the case where the draft model
|
|
|
|
|
# generates tokens beyond the max model length. Since it is complex
|
|
|
|
|
# to remove such requests from the batch, we keep them in the batch
|
|
|
|
|
# but adjust the position ids and slot mappings to avoid the
|
|
|
|
|
# out-of-range access during the model execution. The draft tokens
|
|
|
|
|
# generated with this adjustment should be ignored.
|
|
|
|
|
if self.uses_mrope:
|
2026-03-14 18:26:37 +08:00
|
|
|
exceeds_max_model_len = used_update_positions[0] >= self.max_model_len
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
# Mask out the position ids that exceed the max model length.
|
|
|
|
|
# Otherwise, we may get out-of-range error in RoPE.
|
|
|
|
|
clamped_positions = torch.where(
|
2026-02-07 09:16:07 +08:00
|
|
|
exceeds_max_model_len.unsqueeze(0), torch.zeros_like(used_update_positions), used_update_positions
|
|
|
|
|
)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
else:
|
2026-03-14 18:26:37 +08:00
|
|
|
exceeds_max_model_len = used_update_positions >= self.max_model_len
|
2026-02-07 09:16:07 +08:00
|
|
|
clamped_positions = torch.where(exceeds_max_model_len, 0, used_update_positions)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
|
|
|
|
# For data integrity when async scheduling, we shouldn't use in place
|
|
|
|
|
# operations in case they are modified in next step's `prepare_input`
|
|
|
|
|
# of main model.
|
|
|
|
|
# Increment the sequence lengths.
|
|
|
|
|
common_attn_metadata.seq_lens[:batch_size] += 1
|
|
|
|
|
# For the requests that exceed the max model length, we set the
|
|
|
|
|
# sequence length to 1 to minimize their overheads in attention.
|
2026-02-07 09:16:07 +08:00
|
|
|
common_attn_metadata.seq_lens[:batch_size].masked_fill_(exceeds_max_model_len, 1)
|
|
|
|
|
|
|
|
|
|
common_attn_metadata.seq_lens_cpu[:batch_size] = common_attn_metadata.seq_lens_cpu[:batch_size] + 1
|
|
|
|
|
exceeds_mask = common_attn_metadata.seq_lens_cpu[:batch_size] >= self.max_model_len
|
|
|
|
|
common_attn_metadata.seq_lens_cpu[:batch_size].masked_fill_(exceeds_mask, 1)
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata.num_computed_tokens_cpu[:batch_size] += 1
|
|
|
|
|
if self.uses_mrope:
|
|
|
|
|
common_attn_metadata.positions[:batch_size].copy_(clamped_positions[0])
|
|
|
|
|
else:
|
|
|
|
|
common_attn_metadata.positions[:batch_size].copy_(clamped_positions)
|
|
|
|
|
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
|
|
|
|
num_computed_tokens_of_pcp_dcp = self.runner.pcp_manager._get_cp_local_seq_lens(
|
2026-03-06 20:49:49 +08:00
|
|
|
ori_seq_len + draft_step + 1,
|
2026-02-27 16:06:56 +08:00
|
|
|
self.pcp_size,
|
|
|
|
|
self.dcp_size,
|
|
|
|
|
self.runner.parallel_config.cp_kv_cache_interleave_size,
|
|
|
|
|
)
|
|
|
|
|
cp_seq_len = num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, self.dcp_rank]
|
|
|
|
|
# update slot_mapping
|
|
|
|
|
slot_indices += self.pcp_size
|
|
|
|
|
slot_mapping = mtp_slot_mapping[slot_indices]
|
2026-03-17 16:14:45 +08:00
|
|
|
self.slot_mapping_group[draft_step][: batch_size * self.pcp_size] = slot_mapping
|
|
|
|
|
common_attn_metadata.slot_mapping = self.slot_mapping_group[draft_step]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
else:
|
2026-03-09 11:05:01 +08:00
|
|
|
# NOTE: In vllm, `block_size = attn_metadata_builder.kv_cache_spec.block_size`.
|
|
|
|
|
# However, in vllm-ascend, the above value can be multiple of `kernel_block_size`,
|
|
|
|
|
# which is not correct for computing `slot_mapping` below.
|
|
|
|
|
block_size = self.kernel_block_size
|
2026-02-27 16:06:56 +08:00
|
|
|
|
|
|
|
|
# Compute the slot mapping.
|
|
|
|
|
if self.uses_mrope:
|
|
|
|
|
block_numbers = clamped_positions[0] // block_size
|
|
|
|
|
else:
|
|
|
|
|
block_numbers = clamped_positions // block_size
|
|
|
|
|
block_ids = old_common_metadata.block_table_tensor.gather(dim=1, index=block_numbers.view(-1, 1))
|
|
|
|
|
block_ids = block_ids.view(-1)
|
|
|
|
|
if self.uses_mrope:
|
|
|
|
|
slot_mapping = block_ids * block_size + clamped_positions[0] % block_size
|
|
|
|
|
else:
|
|
|
|
|
slot_mapping = block_ids * block_size + clamped_positions % block_size
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2026-02-27 16:06:56 +08:00
|
|
|
# Mask out the slot mappings that exceed the max model length.
|
|
|
|
|
# Otherwise, the KV cache will be inadvertently updated with the
|
|
|
|
|
# padding tokens.
|
|
|
|
|
slot_mapping.masked_fill_(exceeds_max_model_len, PADDING_SLOT_ID)
|
|
|
|
|
self.slot_mapping_group[draft_step][: slot_mapping.shape[0]].copy_(slot_mapping.to(torch.int32))
|
|
|
|
|
self.slot_mapping_group[draft_step][slot_mapping.shape[0] :].fill_(PADDING_SLOT_ID)
|
|
|
|
|
# Set the address of the attn_metadata.slot_mapping to the self.slot_mapping_group[idx]
|
2026-03-06 17:11:22 +08:00
|
|
|
common_attn_metadata.slot_mapping = self.slot_mapping_group[draft_step]
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
|
2026-03-18 09:24:43 +08:00
|
|
|
attn_metadata_builder = attn_group.get_metadata_builder()
|
2026-03-13 16:14:15 +08:00
|
|
|
|
|
|
|
|
attn_metadata = attn_metadata_builder.build_for_drafting(
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
common_attn_metadata=common_attn_metadata,
|
|
|
|
|
draft_index=draft_step,
|
|
|
|
|
)
|
|
|
|
|
|
2026-02-27 16:06:56 +08:00
|
|
|
if self.pcp_size * self.dcp_size > 1:
|
|
|
|
|
if self.vllm_config.model_config.use_mla:
|
2026-03-27 14:24:53 +08:00
|
|
|
if getattr(attn_metadata, "decode", None):
|
|
|
|
|
attn_metadata.decode.cp_seq_len = cp_seq_len
|
2026-02-27 16:06:56 +08:00
|
|
|
else:
|
|
|
|
|
attn_metadata.decode_meta.num_computed_tokens_of_pcp_dcp = num_computed_tokens_of_pcp_dcp
|
|
|
|
|
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
return common_attn_metadata, attn_metadata
|
|
|
|
|
|
2025-12-16 22:06:40 +08:00
|
|
|
def prepare_next_token_ids_padded(
|
|
|
|
|
self,
|
|
|
|
|
common_attn_metadata: CommonAttentionMetadata,
|
|
|
|
|
sampled_token_ids: torch.Tensor,
|
|
|
|
|
requests: dict[str, CachedRequestState],
|
|
|
|
|
gpu_input_batch: InputBatch,
|
|
|
|
|
discard_request_indices: torch.Tensor,
|
|
|
|
|
num_discarded_requests: int,
|
2025-09-04 11:34:47 +08:00
|
|
|
) -> tuple[torch.Tensor, torch.Tensor]:
|
2025-10-25 09:49:42 +08:00
|
|
|
"""
|
2025-12-16 22:06:40 +08:00
|
|
|
This function is used to prepare the inputs for speculative decoding.
|
|
|
|
|
It calculates the next token ids and the number of valid sampled tokens
|
|
|
|
|
for each request, considering the "discarded" requests whose next token
|
|
|
|
|
is not sampled and comes from `request.get_token_id()` instead.
|
|
|
|
|
It also accounts for the rejected tokens in `sampled_token_ids`.
|
|
|
|
|
This function must use device functions to operate on the inputs, and
|
|
|
|
|
should not introduce any blocking CPU-GPU synchronization.
|
|
|
|
|
"""
|
|
|
|
|
# TODO(Ben): Combine this into a custom fused kernel
|
|
|
|
|
|
|
|
|
|
# Precompute get_token_id for when there is no valid next token
|
|
|
|
|
num_reqs = gpu_input_batch.num_reqs
|
2026-02-07 09:16:07 +08:00
|
|
|
self.backup_next_token_ids.np[:num_reqs] = np.array(
|
|
|
|
|
[
|
|
|
|
|
requests[gpu_input_batch.req_ids[i]].get_token_id(common_attn_metadata.seq_lens_cpu[i].item())
|
|
|
|
|
for i in range(num_reqs)
|
|
|
|
|
]
|
|
|
|
|
)
|
2025-12-16 22:06:40 +08:00
|
|
|
self.backup_next_token_ids.copy_to_gpu(num_reqs)
|
|
|
|
|
|
|
|
|
|
# Mask out the sampled tokens indices that should not be sampled.
|
2026-02-07 09:16:07 +08:00
|
|
|
discard_sampled_tokens_req_indices = discard_request_indices[:num_discarded_requests]
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
valid_sampled_token_ids_gpu = sampled_token_ids.clone()
|
2026-02-07 09:16:07 +08:00
|
|
|
valid_sampled_token_ids_gpu.index_fill_(0, discard_sampled_tokens_req_indices, -1)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
# Generate a mask for all valid tokens within those requests
|
2026-02-07 09:16:07 +08:00
|
|
|
valid_mask = (valid_sampled_token_ids_gpu != -1) & (valid_sampled_token_ids_gpu < gpu_input_batch.vocab_size)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
# Count the number of valid tokens in each request
|
|
|
|
|
valid_sampled_tokens_count = valid_mask.sum(dim=1)
|
|
|
|
|
|
|
|
|
|
# Get the rightmost valid index per row
|
|
|
|
|
last_valid_indices = valid_sampled_tokens_count - 1
|
|
|
|
|
last_valid_indices_safe = torch.clamp(last_valid_indices, min=0)
|
|
|
|
|
|
|
|
|
|
# Get last valid token from each row
|
|
|
|
|
# (assume undefined state where there is no valid token)
|
2026-02-07 09:16:07 +08:00
|
|
|
selected_tokens = torch.gather(valid_sampled_token_ids_gpu, 1, last_valid_indices_safe.unsqueeze(1)).squeeze(1)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
# Use last token if valid, pre-computed backup if not
|
|
|
|
|
batch_size = valid_sampled_token_ids_gpu.shape[0]
|
|
|
|
|
next_token_ids = torch.where(
|
|
|
|
|
last_valid_indices != -1,
|
|
|
|
|
selected_tokens,
|
|
|
|
|
self.backup_next_token_ids.gpu[:batch_size],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return next_token_ids, valid_sampled_tokens_count
|
|
|
|
|
|
|
|
|
|
def prepare_inputs(
|
|
|
|
|
self,
|
|
|
|
|
common_attn_metadata: CommonAttentionMetadata,
|
|
|
|
|
sampled_token_ids: list[list[int]],
|
|
|
|
|
num_draft_tokens: list[int],
|
|
|
|
|
) -> tuple[CommonAttentionMetadata, torch.Tensor]:
|
|
|
|
|
"""
|
|
|
|
|
This function is used to prepare the inputs for speculative decoding.
|
2025-10-25 09:49:42 +08:00
|
|
|
It updates to the common_attn_metadata to account for the rejected
|
|
|
|
|
tokens (and newly sampled tokens). It also returns the token indices
|
|
|
|
|
of the tokens that should be fed to the speculator.
|
|
|
|
|
"""
|
|
|
|
|
# E.g.
|
|
|
|
|
# common_attn_metadata.query_start_loc{_cpu}:
|
2025-12-16 22:06:40 +08:00
|
|
|
# [0, q1, q1 + q2, q1 + q2 + q3]
|
2025-10-25 09:49:42 +08:00
|
|
|
# common_attn_metadata.seq_lens{_cpu}: [s1, s2, s3]
|
|
|
|
|
# num_rejected_tokens: [n1, n2, n3]
|
|
|
|
|
# This function computes the intermediate values:
|
|
|
|
|
# num_tokens_per_req: [q1 - n1, q2 - n2, q3 - n3]
|
|
|
|
|
# And returns:
|
|
|
|
|
# common_attn_metadata.query_start_loc{_cpu}:
|
2025-12-16 22:06:40 +08:00
|
|
|
# [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
|
2025-10-25 09:49:42 +08:00
|
|
|
# common_attn_metadata.seq_lens{_cpu}:
|
2025-12-16 22:06:40 +08:00
|
|
|
# [s1 - n1 + 1, s2 - n2 + 1, s3 - n3 + 1]
|
2025-10-25 09:49:42 +08:00
|
|
|
# token_indices: [0, 1, ..., q1 - n1 - 1,
|
2025-12-16 22:06:40 +08:00
|
|
|
# q1, q1 + 1, ..., q1 + q2 - n2 - 1,
|
|
|
|
|
# q1 + q2, q1 + q2 + 1, ..., q1 + q2 + q3 - n3 - 1]
|
|
|
|
|
|
|
|
|
|
num_actual_reqs = len(num_draft_tokens)
|
|
|
|
|
num_rejected_tokens = [
|
2026-02-07 09:16:07 +08:00
|
|
|
n + 1 - len(sampled_token_ids[i]) if n > 0 else 0 for i, n in enumerate(num_draft_tokens)
|
2025-12-16 22:06:40 +08:00
|
|
|
]
|
2026-02-07 09:16:07 +08:00
|
|
|
num_rejected_tokens = torch.tensor(num_rejected_tokens, dtype=torch.int32)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
device = common_attn_metadata.query_start_loc.device
|
2026-02-07 09:16:07 +08:00
|
|
|
query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu[: num_actual_reqs + 1]
|
2025-12-16 22:06:40 +08:00
|
|
|
seq_lens_cpu = common_attn_metadata.seq_lens_cpu[:num_actual_reqs]
|
|
|
|
|
new_seq_lens_cpu = seq_lens_cpu - num_rejected_tokens
|
2025-10-25 09:49:42 +08:00
|
|
|
|
|
|
|
|
# [0, q1, q1 + q2, q1 + q2 + q3] -> [q1, q2, q3]
|
2026-02-07 09:16:07 +08:00
|
|
|
new_query_len_per_req = query_start_loc_cpu[1:] - query_start_loc_cpu[:-1]
|
2025-10-25 09:49:42 +08:00
|
|
|
# [q1, q2, q3] -> [q1 - n1, q2 - n2, q3 - n3]
|
2025-12-16 22:06:40 +08:00
|
|
|
new_num_tokens_per_req = new_query_len_per_req - num_rejected_tokens
|
2025-10-25 09:49:42 +08:00
|
|
|
new_num_tokens_per_req_np = new_num_tokens_per_req.numpy()
|
|
|
|
|
|
|
|
|
|
# [q1 - n1, q2 - n2, q3 - n3] ->
|
|
|
|
|
# [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
|
|
|
|
|
new_query_start_loc_cpu = torch.zeros(
|
|
|
|
|
query_start_loc_cpu.shape,
|
|
|
|
|
dtype=torch.int32,
|
2025-12-16 22:06:40 +08:00
|
|
|
pin_memory=is_pin_memory_available(),
|
|
|
|
|
)
|
2025-10-25 09:49:42 +08:00
|
|
|
new_query_start_loc_np = new_query_start_loc_cpu.numpy()
|
|
|
|
|
np.cumsum(new_num_tokens_per_req_np, out=new_query_start_loc_np[1:])
|
|
|
|
|
|
|
|
|
|
total_num_tokens = new_query_start_loc_np[-1]
|
|
|
|
|
# Example assuming num_tokens_per_req_np = [2, 4, 3]
|
|
|
|
|
# this implies that `new_query_start_locs` is:
|
|
|
|
|
# [0, 2, 6, 9] ->
|
|
|
|
|
# [0, 0, 2, 2, 2, 2, 6, 6, 6]
|
|
|
|
|
# _r1_ ____r2____ ___r3__
|
2026-02-07 09:16:07 +08:00
|
|
|
new_query_start_locs_expanded = np.repeat(new_query_start_loc_np[:-1], new_num_tokens_per_req_np)
|
2025-10-25 09:49:42 +08:00
|
|
|
# [0, 1, 2, 3, 4, 5, 6, 7, 8] ->
|
|
|
|
|
# [0, 1, 0, 1, 2, 3, 0, 1, 2]
|
|
|
|
|
# _r1_ ____r2____ ___r3__
|
[CI]Fixed the spell check function in `typos.toml` (#6753)
### What this PR does / why we need it?
The incorrect regular expression syntax `.*[UE4M3|ue4m3].*` actually
ignores all words containing any of the following characters: `u, e, 4,
m, 3, |`
```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
".*UE8M0.*", ".*[UE4M3|ue4m3].*", ".*eles.*", ".*fo.*", ".*ba.*",
".*ot.*", ".*[Tt]h[rR].*"]
```
===fix===>
```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
".*UE8M0.*", ".*(UE4M3|ue4m3]).*", ".*eles.*", ".*fo.*", ".*ba.*",
".*ot.*", ".*[Tt]h[rR].*"]
```
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.15.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/9562912cead1f11e8540fb91306c5cbda66f0007
Signed-off-by: MrZ20 <2609716663@qq.com>
2026-02-14 11:57:26 +08:00
|
|
|
token_offsets = self.token_arange_np[:total_num_tokens] - new_query_start_locs_expanded
|
2025-10-25 09:49:42 +08:00
|
|
|
|
|
|
|
|
# Expand starting positions to match token pattern
|
|
|
|
|
# [0, q1, q1 + q2] ->
|
|
|
|
|
# [0, 0, q1, q1, q1, q1, q1 + q2, q1 + q2, q1 + q2]
|
|
|
|
|
# _r1_ _____r2_______ ___________r3____________
|
2026-02-07 09:16:07 +08:00
|
|
|
old_query_start_locs_expanded = np.repeat(query_start_loc_cpu[:-1].numpy(), new_num_tokens_per_req_np)
|
2025-10-25 09:49:42 +08:00
|
|
|
# Final token indices are:
|
2025-12-16 22:06:40 +08:00
|
|
|
# [0, 1, // req 1
|
|
|
|
|
# q1 + 0, q1 + 1, q1 + 2, q1 + 3, // req 2
|
|
|
|
|
# q1 + q2 + 0, q1 + q2 + 1, q1 + q2 + 2] // req 3
|
[CI]Fixed the spell check function in `typos.toml` (#6753)
### What this PR does / why we need it?
The incorrect regular expression syntax `.*[UE4M3|ue4m3].*` actually
ignores all words containing any of the following characters: `u, e, 4,
m, 3, |`
```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
".*UE8M0.*", ".*[UE4M3|ue4m3].*", ".*eles.*", ".*fo.*", ".*ba.*",
".*ot.*", ".*[Tt]h[rR].*"]
```
===fix===>
```yaml
extend-ignore-identifiers-re = [".*Unc.*", ".*_thw",
".*UE8M0.*", ".*(UE4M3|ue4m3]).*", ".*eles.*", ".*fo.*", ".*ba.*",
".*ot.*", ".*[Tt]h[rR].*"]
```
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.15.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/9562912cead1f11e8540fb91306c5cbda66f0007
Signed-off-by: MrZ20 <2609716663@qq.com>
2026-02-14 11:57:26 +08:00
|
|
|
token_indices_np = token_offsets + old_query_start_locs_expanded
|
2026-02-07 09:16:07 +08:00
|
|
|
token_indices = torch.from_numpy(token_indices_np).to(device, non_blocking=True)
|
2025-10-25 09:49:42 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
common_attn_metadata.slot_mapping[: token_indices.shape[0]].copy_(
|
|
|
|
|
common_attn_metadata.slot_mapping[token_indices]
|
|
|
|
|
)
|
|
|
|
|
common_attn_metadata.slot_mapping[token_indices.shape[0] :].fill_(-1)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
# NOTE: Currently positions and seq_lens are not used in attn forward
|
2025-12-16 22:06:40 +08:00
|
|
|
# so we do not need to fixed them. But if they are used in the future,
|
|
|
|
|
# we should fixed them.
|
|
|
|
|
spec_common_attn_metadata = AscendCommonAttentionMetadata(
|
2026-02-07 09:16:07 +08:00
|
|
|
query_start_loc=new_query_start_loc_cpu.to(device, non_blocking=True),
|
2025-12-16 22:06:40 +08:00
|
|
|
query_start_loc_cpu=new_query_start_loc_cpu,
|
|
|
|
|
seq_lens=new_seq_lens_cpu.to(device, non_blocking=True),
|
|
|
|
|
seq_lens_cpu=new_seq_lens_cpu,
|
2026-02-07 09:16:07 +08:00
|
|
|
num_computed_tokens_cpu=common_attn_metadata.num_computed_tokens_cpu,
|
2025-12-16 22:06:40 +08:00
|
|
|
num_reqs=common_attn_metadata.num_reqs,
|
|
|
|
|
num_actual_tokens=total_num_tokens,
|
|
|
|
|
num_input_tokens=common_attn_metadata.num_input_tokens,
|
|
|
|
|
max_query_len=new_query_len_per_req.max().item(),
|
|
|
|
|
block_table_tensor=common_attn_metadata.block_table_tensor,
|
|
|
|
|
slot_mapping=common_attn_metadata.slot_mapping,
|
|
|
|
|
actual_seq_lengths_q=self.runner.actual_seq_lengths_q,
|
|
|
|
|
positions=common_attn_metadata.positions[token_indices],
|
|
|
|
|
attn_state=self.runner.attn_state,
|
|
|
|
|
decode_token_per_req=self.runner.decode_token_per_req,
|
2026-02-07 09:16:07 +08:00
|
|
|
max_seq_len=0,
|
|
|
|
|
)
|
2025-12-16 22:06:40 +08:00
|
|
|
return spec_common_attn_metadata, token_indices
|
|
|
|
|
|
|
|
|
|
def prepare_inputs_padded(
|
|
|
|
|
self,
|
|
|
|
|
common_attn_metadata: CommonAttentionMetadata,
|
|
|
|
|
spec_decode_metadata: SpecDecodeMetadata,
|
|
|
|
|
valid_sampled_tokens_count: torch.Tensor,
|
2026-03-13 14:07:35 +08:00
|
|
|
) -> tuple[CommonAttentionMetadata, torch.Tensor, torch.Tensor, torch.Tensor]:
|
2025-12-16 22:06:40 +08:00
|
|
|
"""
|
|
|
|
|
This function is used to prepare the inputs for speculative decoding
|
|
|
|
|
It updates the common_attn_metadata for speculative decoding,
|
|
|
|
|
but does not consider the rejected tokens. Instead, all tokens
|
|
|
|
|
are included as inputs to the speculator, with the rejected tokens
|
|
|
|
|
used as padding and filtered out later by `token_indices_to_sample`.
|
|
|
|
|
No blocking CPU operations should be introduced in this function.
|
|
|
|
|
"""
|
2026-01-05 16:51:29 +08:00
|
|
|
if HAS_TRITON:
|
|
|
|
|
num_reqs = common_attn_metadata.num_reqs
|
|
|
|
|
device = valid_sampled_tokens_count.device
|
|
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
token_indices_to_sample = torch.empty((num_reqs,), dtype=torch.int32, device=device)
|
2026-03-13 14:07:35 +08:00
|
|
|
num_rejected_tokens_gpu = torch.empty((num_reqs,), dtype=torch.int32, device=device)
|
2026-02-07 09:16:07 +08:00
|
|
|
num_blocks_needed = triton.cdiv(num_reqs, _PREPARE_INPUTS_BLOCK_SIZE)
|
2026-01-05 16:51:29 +08:00
|
|
|
num_vector_core = get_vectorcore_num()
|
|
|
|
|
grid_size = min(num_blocks_needed, num_vector_core)
|
2026-02-07 09:16:07 +08:00
|
|
|
grid = (grid_size,)
|
2026-01-05 16:51:29 +08:00
|
|
|
|
|
|
|
|
prepare_inputs_padded_kernel[grid](
|
|
|
|
|
spec_decode_metadata.cu_num_draft_tokens,
|
|
|
|
|
valid_sampled_tokens_count,
|
|
|
|
|
common_attn_metadata.query_start_loc,
|
|
|
|
|
token_indices_to_sample,
|
2026-03-13 14:07:35 +08:00
|
|
|
num_rejected_tokens_gpu,
|
2026-01-05 16:51:29 +08:00
|
|
|
num_reqs,
|
|
|
|
|
BLOCK_SIZE=_PREPARE_INPUTS_BLOCK_SIZE,
|
|
|
|
|
)
|
|
|
|
|
else:
|
2026-02-07 09:16:07 +08:00
|
|
|
num_draft_tokens_gpu = torch.cat(
|
|
|
|
|
[
|
|
|
|
|
spec_decode_metadata.cu_num_draft_tokens[0:1],
|
|
|
|
|
spec_decode_metadata.cu_num_draft_tokens[1:] - spec_decode_metadata.cu_num_draft_tokens[:-1],
|
|
|
|
|
]
|
|
|
|
|
)
|
2026-01-05 16:51:29 +08:00
|
|
|
|
|
|
|
|
num_rejected_tokens_gpu = torch.where(
|
|
|
|
|
num_draft_tokens_gpu > 0,
|
|
|
|
|
num_draft_tokens_gpu + 1 - valid_sampled_tokens_count,
|
|
|
|
|
torch.zeros_like(num_draft_tokens_gpu),
|
|
|
|
|
)
|
2025-12-16 22:06:40 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
token_indices_to_sample = common_attn_metadata.query_start_loc[1:] - 1 - num_rejected_tokens_gpu
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
|
|
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
new_query_len_per_req = query_start_loc_cpu[1:] - query_start_loc_cpu[:-1]
|
2025-12-16 22:06:40 +08:00
|
|
|
|
|
|
|
|
total_num_tokens = query_start_loc_cpu[-1].item()
|
|
|
|
|
token_indices = self.arange[:total_num_tokens]
|
|
|
|
|
|
2025-12-29 09:54:51 +08:00
|
|
|
# NOTE: Currently positions and seq_lens are not used in attn forward
|
2025-12-16 22:06:40 +08:00
|
|
|
# so we do not need to fixed them. But if they are used in the future,
|
|
|
|
|
# we should fixed them.
|
|
|
|
|
spec_common_attn_metadata = AscendCommonAttentionMetadata(
|
|
|
|
|
query_start_loc=common_attn_metadata.query_start_loc,
|
|
|
|
|
query_start_loc_cpu=query_start_loc_cpu,
|
|
|
|
|
seq_lens_cpu=common_attn_metadata.seq_lens_cpu,
|
|
|
|
|
num_reqs=common_attn_metadata.num_reqs,
|
2026-02-07 09:16:07 +08:00
|
|
|
num_actual_tokens=common_attn_metadata.num_actual_tokens if self.pcp_size > 1 else total_num_tokens,
|
2025-12-16 22:06:40 +08:00
|
|
|
num_input_tokens=common_attn_metadata.num_input_tokens,
|
|
|
|
|
max_query_len=new_query_len_per_req.max().item(),
|
|
|
|
|
actual_seq_lengths_q=self.runner.actual_seq_lengths_q,
|
|
|
|
|
block_table_tensor=common_attn_metadata.block_table_tensor,
|
|
|
|
|
slot_mapping=common_attn_metadata.slot_mapping,
|
|
|
|
|
positions=common_attn_metadata.positions,
|
|
|
|
|
attn_state=self.runner.attn_state,
|
|
|
|
|
decode_token_per_req=self.runner.decode_token_per_req,
|
2026-02-07 09:16:07 +08:00
|
|
|
num_computed_tokens_cpu=common_attn_metadata.num_computed_tokens_cpu,
|
2025-12-23 00:10:52 +08:00
|
|
|
seq_lens=common_attn_metadata.seq_lens,
|
2026-02-07 09:16:07 +08:00
|
|
|
max_seq_len=0,
|
|
|
|
|
)
|
2025-09-04 11:34:47 +08:00
|
|
|
|
2026-03-13 14:07:35 +08:00
|
|
|
return spec_common_attn_metadata, token_indices, token_indices_to_sample, num_rejected_tokens_gpu
|
2026-01-06 16:47:39 +08:00
|
|
|
|
2026-02-07 09:16:07 +08:00
|
|
|
def _split_pcp_input(self, req_scheduled_tokens, input_ids, target_hidden_states):
|
2026-01-06 16:47:39 +08:00
|
|
|
"""
|
|
|
|
|
Split prefill input_ids and target_hidden_states in pcp group.
|
|
|
|
|
1. input_ids padding: [t0, t1, t2, t3, t4, t5] -> [t0, t1, t2, t3, t4, t5, pad, pad]
|
|
|
|
|
2. split input_ids: pcp0 [t0, t1, pad, pad], pcp1 [t2, t3, t4, t5]
|
|
|
|
|
3. split target_hidden_states (already include pcp padding):
|
|
|
|
|
[h0, h1, h2, h3, h4, h5, pad, pad] -> pcp0 [h0, h1, pad, pad], pcp1 [h2, h3, h4, h5]
|
|
|
|
|
4. also update max_query_len, seq_lens, cu_num_tokens according to pcp split.
|
|
|
|
|
"""
|
|
|
|
|
if len(req_scheduled_tokens) == 0:
|
|
|
|
|
# no prefill inputs to split, return empty result
|
|
|
|
|
return (
|
|
|
|
|
0,
|
2026-02-07 09:16:07 +08:00
|
|
|
torch.zeros([0], device="npu"),
|
|
|
|
|
torch.zeros([0, target_hidden_states.size(1)], device="npu"),
|
2026-01-06 16:47:39 +08:00
|
|
|
0,
|
|
|
|
|
torch.zeros([0]),
|
|
|
|
|
torch.tensor([0], dtype=torch.int32),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _pcp_pad_and_split(num_tokens):
|
2026-02-07 09:16:07 +08:00
|
|
|
num_pcp_padded_scheduled_tokens = cdiv(num_tokens, 2 * self.pcp_size) * 2 * self.pcp_size
|
2026-01-06 16:47:39 +08:00
|
|
|
pcp_pad = num_pcp_padded_scheduled_tokens - num_tokens
|
|
|
|
|
chunk_size = num_pcp_padded_scheduled_tokens // (2 * self.pcp_size)
|
|
|
|
|
|
|
|
|
|
# split position_ids (and use split position_ids to split input_ids afterwards)
|
|
|
|
|
req_position_cp: list[int] = []
|
2026-02-07 09:16:07 +08:00
|
|
|
req_position_cp.extend(self.full_indices[self.pcp_rank * chunk_size : (self.pcp_rank + 1) * chunk_size])
|
2026-01-06 16:47:39 +08:00
|
|
|
req_position_cp.extend(
|
2026-02-07 09:16:07 +08:00
|
|
|
self.full_indices[
|
|
|
|
|
num_pcp_padded_scheduled_tokens - (self.pcp_rank + 1) * chunk_size : num_pcp_padded_scheduled_tokens
|
|
|
|
|
- self.pcp_rank * chunk_size
|
|
|
|
|
]
|
|
|
|
|
)
|
2026-01-06 16:47:39 +08:00
|
|
|
|
|
|
|
|
return req_position_cp, num_pcp_padded_scheduled_tokens, pcp_pad
|
|
|
|
|
|
|
|
|
|
num_pcp_scheduled_tokens = []
|
|
|
|
|
ori_start_index = 0
|
|
|
|
|
pad_start_index = 0
|
|
|
|
|
pcp_split_input_ids_list = []
|
|
|
|
|
pcp_split_hidden_states_list = []
|
|
|
|
|
for ori_num_tokens in req_scheduled_tokens.values():
|
2026-02-07 09:16:07 +08:00
|
|
|
req_position_pcp, num_pcp_padded_scheduled_tokens, num_pcp_pad = _pcp_pad_and_split(ori_num_tokens)
|
2026-01-06 16:47:39 +08:00
|
|
|
actual_num_tokens = len(req_position_pcp)
|
|
|
|
|
num_pcp_scheduled_tokens.append(actual_num_tokens)
|
2026-02-07 09:16:07 +08:00
|
|
|
pad_input_ids = F.pad(input_ids[ori_start_index : ori_start_index + ori_num_tokens], (0, num_pcp_pad))
|
2026-01-06 16:47:39 +08:00
|
|
|
ori_start_index += ori_num_tokens
|
2026-02-07 09:16:07 +08:00
|
|
|
pcp_chunk_indices = [pad_start_index + pos for pos in req_position_pcp]
|
2026-01-06 16:47:39 +08:00
|
|
|
pcp_split_input_ids = pad_input_ids[req_position_pcp]
|
|
|
|
|
pcp_split_hidden_states = target_hidden_states[pcp_chunk_indices]
|
|
|
|
|
pcp_split_input_ids_list.append(pcp_split_input_ids)
|
|
|
|
|
pcp_split_hidden_states_list.append(pcp_split_hidden_states)
|
|
|
|
|
pad_start_index += num_pcp_padded_scheduled_tokens
|
|
|
|
|
num_tokens = sum(num_pcp_scheduled_tokens)
|
|
|
|
|
input_ids = torch.cat(pcp_split_input_ids_list)
|
|
|
|
|
target_hidden_states = torch.cat(pcp_split_hidden_states_list, dim=0)
|
|
|
|
|
max_query_len = max(num_pcp_scheduled_tokens)
|
|
|
|
|
seq_lens = torch.tensor(num_pcp_scheduled_tokens, dtype=torch.int32)
|
2026-02-07 09:16:07 +08:00
|
|
|
cu_num_tokens = torch.tensor(np.insert(np.cumsum(np.array(num_pcp_scheduled_tokens)), 0, 0))
|
2026-01-06 16:47:39 +08:00
|
|
|
return num_tokens, input_ids, target_hidden_states, max_query_len, seq_lens, cu_num_tokens
|
|
|
|
|
|
|
|
|
|
# update full-graph params for one spec token
|
[Feat] Merge the multi eagle graphs to one graph (#5940)
### What this PR does / why we need it?
This PR merge all steps of draft model in fullgraph mode, to avoid the
synchronize between each graph, reduce the bubble time.
#### Key ideas:
- The "model forward" of the step 0 (first step) and remaining steps are
captured together as a "Callable", rather than capturing each model
individually.
- "update_attn_params" is moved outside the entire graph, meaning that
all "attn_metadata" required by all steps are constructed before
"replay", and the "attn_params" of all steps are updated at once.
- Remove synchronization between the main model graph and draft model
graph.
#### Key params/functions:
- params: draft_attn_metadatas, attn_metadata_multi_steps,
slot_mapping_group
- functions: _run_merged_draft, attn_update_stack_num_spec_norm,
update_attn_params, _propose, dummy_run
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/11b6af5280d6d6dfb8953af16e67b25f819b3be9
Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
2026-01-23 08:37:02 +08:00
|
|
|
def _update_full_graph_params(self, forward_context, num_tokens, draft_attn_metadatas=None):
|
2026-03-23 15:39:24 +08:00
|
|
|
assert len(self.draft_attn_groups) > 0
|
|
|
|
|
attn_backend = self.draft_attn_groups[0].backend
|
2026-01-26 09:04:54 +08:00
|
|
|
update_full_graph_params(
|
2026-03-21 16:57:22 +08:00
|
|
|
attn_backend,
|
2026-02-07 09:16:07 +08:00
|
|
|
self.update_stream,
|
|
|
|
|
forward_context,
|
|
|
|
|
num_tokens,
|
|
|
|
|
self.vllm_config,
|
|
|
|
|
self.vllm_config.speculative_config,
|
|
|
|
|
draft_attn_metadatas=draft_attn_metadatas,
|
|
|
|
|
)
|
2026-01-15 10:24:35 +08:00
|
|
|
|
2026-01-20 10:06:00 +08:00
|
|
|
# padding tensor into desired size
|
2026-03-21 16:57:22 +08:00
|
|
|
def _pad_tensor(self, tensor, desired_size):
|
|
|
|
|
pad_size = desired_size - tensor.shape[0]
|
|
|
|
|
if pad_size > 0:
|
|
|
|
|
pad = [0] * (2 * tensor.dim() - 1) + [pad_size]
|
|
|
|
|
tensor = F.pad(tensor, pad, mode="constant", value=0)
|
|
|
|
|
else:
|
|
|
|
|
tensor = tensor[:desired_size]
|
|
|
|
|
return tensor
|
2026-01-20 10:06:00 +08:00
|
|
|
|
2026-01-15 10:24:35 +08:00
|
|
|
def maybe_pad_and_reduce(
|
|
|
|
|
self,
|
|
|
|
|
hidden_states: torch.Tensor,
|
|
|
|
|
positions: torch.Tensor,
|
|
|
|
|
) -> tuple[torch.Tensor, torch.Tensor]:
|
Qwen3.5 MoE supports flashcomm v1 (#7644)
cherry pick from https://github.com/vllm-project/vllm-ascend/pull/7486
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
Multimodal models like Qwen3.5 MoE does embedding in model_runner, so
when flash comm is enabled, the first AllGather operation should be
skipped.
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No.
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
- vLLM version: v0.18.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/8b6325758cce5f9c36d38f2462edbd368b97a07c
---------
Signed-off-by: Wangbingjie <wangbj1207@126.com>
Signed-off-by: wangbj127 <256472688+wangbj127@users.noreply.github.com>
2026-03-25 23:09:33 +08:00
|
|
|
if self.is_multimodal_model and _EXTRA_CTX.flash_comm_v1_enabled:
|
|
|
|
|
return hidden_states, positions
|
2026-01-15 10:24:35 +08:00
|
|
|
if self.method == "mtp":
|
2026-03-13 09:11:46 +08:00
|
|
|
if _EXTRA_CTX.flash_comm_v1_enabled:
|
2026-02-07 09:16:07 +08:00
|
|
|
hidden_states = torch.ops.vllm.maybe_pad_and_reduce(hidden_states)
|
2026-01-15 10:24:35 +08:00
|
|
|
positions = positions.unsqueeze(-1)
|
|
|
|
|
positions = torch.ops.vllm.maybe_pad_and_reduce(positions)
|
|
|
|
|
positions = positions.squeeze(-1)
|
|
|
|
|
else:
|
2026-03-13 09:11:46 +08:00
|
|
|
if _EXTRA_CTX.flash_comm_v1_enabled:
|
2026-02-07 09:16:07 +08:00
|
|
|
hidden_states = split_inputs_tp_to_sp(hidden_states, hidden_states)
|
2026-01-15 10:24:35 +08:00
|
|
|
return hidden_states, positions
|
|
|
|
|
|
|
|
|
|
def maybe_all_gather_and_unpad(
|
|
|
|
|
self,
|
|
|
|
|
last_hidden_states: torch.Tensor,
|
|
|
|
|
positions: torch.Tensor,
|
|
|
|
|
hidden_states: torch.Tensor | None = None,
|
|
|
|
|
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor | None]:
|
|
|
|
|
if self.method == "mtp":
|
|
|
|
|
if self.enable_shared_expert_dp:
|
|
|
|
|
last_hidden_states = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(
|
2026-02-07 09:16:07 +08:00
|
|
|
last_hidden_states.contiguous(), True
|
|
|
|
|
)
|
|
|
|
|
positions = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(positions.contiguous(), True)
|
2026-01-15 10:24:35 +08:00
|
|
|
if hidden_states is not None:
|
|
|
|
|
hidden_states = last_hidden_states
|
|
|
|
|
else:
|
2026-03-13 09:11:46 +08:00
|
|
|
if _EXTRA_CTX.flash_comm_v1_enabled:
|
2026-01-15 10:24:35 +08:00
|
|
|
last_hidden_states = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(
|
2026-02-07 09:16:07 +08:00
|
|
|
last_hidden_states.contiguous(), True
|
|
|
|
|
)
|
2026-01-15 10:24:35 +08:00
|
|
|
if hidden_states is not None:
|
2026-02-07 09:16:07 +08:00
|
|
|
hidden_states = torch.ops.vllm.maybe_all_gather_and_maybe_unpad(hidden_states.contiguous(), True)
|
2026-01-15 10:24:35 +08:00
|
|
|
return last_hidden_states, positions, hidden_states
|
2026-03-13 14:07:35 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class AscendEagleProposer(SpecDecodeBaseProposer):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
vllm_config: VllmConfig,
|
|
|
|
|
device: torch.device,
|
|
|
|
|
runner=None,
|
|
|
|
|
):
|
|
|
|
|
super().__init__(
|
|
|
|
|
vllm_config,
|
|
|
|
|
device,
|
|
|
|
|
pass_hidden_states_to_model=True,
|
|
|
|
|
runner=runner,
|
|
|
|
|
)
|