[Feature] Support to use fullgraph with eagle (#5118)

### What this PR does / why we need it?
    
We support to use full graph with eagle. 

Change list:
1. Distinguish between processing graph_params and draft_graph_params in
attention_v1.
    2. Adapt the full-graph mode in eagle_proposer, include:
        1). If use full graph, make Fullgraph Wrapper when load model.
2). Build a new meatadata, set running mode in FULL and mark attention
update in dummy_run when in Fullgraph mode.
3). Fixed and fill any attn_metadata, such as
attn_metadata.slot_mapping.
        4). Add a descriptor.
        5). Set running mode and triggered update metadata.
3. Trans is_mtp_model to is_draft_model, and add the update of
workspace.

NOTE:
When set async_scheduling=True, the draft model will enforce execution
in eager mode.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
Co-authored-by: Yizhou Liu <liu_yizhou@outlook.com>
Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com>
This commit is contained in:
anon189Ty
2025-12-29 09:54:51 +08:00
committed by GitHub
parent f81cf694b2
commit 3e67e8276c
11 changed files with 348 additions and 103 deletions

View File

@@ -36,7 +36,7 @@ def set_ascend_forward_context(
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
batch_descriptor: Optional[BatchDescriptor] = None,
model_instance: torch.nn.Module = None,
is_mtp_model=False):
is_draft_model=False):
"""A context manager that stores the current forward context,
can be attention metadata, etc.
We add some additional param into forward_context.
@@ -55,7 +55,7 @@ def set_ascend_forward_context(
from vllm_ascend.ops.fused_moe.moe_comm_method import \
get_moe_comm_method
moe_comm_type = select_moe_comm_method(num_tokens, vllm_config,
is_mtp_model)
is_draft_model)
forward_context.moe_comm_type = moe_comm_type
forward_context.moe_comm_method = get_moe_comm_method(moe_comm_type)
@@ -110,7 +110,7 @@ def set_ascend_forward_context(
forward_context.prefetch_mlp_down_proj = False
forward_context.prefetch_mlp_enabled = prefetch_mlp_enabled
forward_context.model_instance = model_instance
forward_context.is_mtp_model = is_mtp_model
forward_context.is_draft_model = is_draft_model
if num_tokens is None and attn_metadata is not None:
num_tokens = attn_metadata.num_actual_tokens
@@ -195,7 +195,7 @@ def get_mc2_mask():
def select_moe_comm_method(num_tokens: int,
vllm_config: VllmConfig,
is_mtp_model=False) -> Optional[MoECommType]:
is_draft_model=False) -> Optional[MoECommType]:
"""Select the MoE communication method according to parallel settings,
device generation, token count, and quantization.
@@ -210,7 +210,7 @@ def select_moe_comm_method(num_tokens: int,
Args:
num_tokens (int): The number of tokens in the current batch.
vllm_config (VllmConfig): Runtime configuration for the model.
is_mtp_model (bool): Whether the model runs in MTP mode (disables fused MC2).
is_draft_model (bool): Whether the model runs in MTP mode (disables fused MC2).
Raises:
ValueError: If the soc version is unsupported.
@@ -249,13 +249,13 @@ def select_moe_comm_method(num_tokens: int,
fused_decode_enable = fused_mc2_enable
if envs_ascend.VLLM_ASCEND_ENABLE_FUSED_MC2 == 1:
fused_decode_enable = fused_mc2_enable and get_ep_group(
).world_size <= 16 and (not is_mtp_model)
).world_size <= 16 and (not is_draft_model)
moe_comm_type = MoECommType.FUSED_MC2 if fused_decode_enable else MoECommType.MC2
else:
fused_prefill_enable = fused_mc2_enable
if envs_ascend.VLLM_ASCEND_ENABLE_FUSED_MC2 == 1:
fused_prefill_enable = fused_mc2_enable and get_ep_group(
).world_size <= 16 and (not is_mtp_model)
).world_size <= 16 and (not is_draft_model)
elif envs_ascend.VLLM_ASCEND_ENABLE_FUSED_MC2 == 2:
fused_prefill_enable = False
moe_comm_type = MoECommType.FUSED_MC2 if fused_prefill_enable else MoECommType.ALLTOALL