[Feature] Support to use fullgraph with eagle (#5118)

### What this PR does / why we need it?
    
We support to use full graph with eagle. 

Change list:
1. Distinguish between processing graph_params and draft_graph_params in
attention_v1.
    2. Adapt the full-graph mode in eagle_proposer, include:
        1). If use full graph, make Fullgraph Wrapper when load model.
2). Build a new meatadata, set running mode in FULL and mark attention
update in dummy_run when in Fullgraph mode.
3). Fixed and fill any attn_metadata, such as
attn_metadata.slot_mapping.
        4). Add a descriptor.
        5). Set running mode and triggered update metadata.
3. Trans is_mtp_model to is_draft_model, and add the update of
workspace.

NOTE:
When set async_scheduling=True, the draft model will enforce execution
in eager mode.

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: anon189Ty <Stari_Falcon@outlook.com>
Co-authored-by: Yizhou Liu <liu_yizhou@outlook.com>
Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com>
This commit is contained in:
anon189Ty
2025-12-29 09:54:51 +08:00
committed by GitHub
parent f81cf694b2
commit 3e67e8276c
11 changed files with 348 additions and 103 deletions

View File

@@ -38,8 +38,9 @@ from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
AscendMetadataForPrefill, enable_cp,
split_decodes_and_prefills,
using_paged_attention)
from vllm_ascend.compilation.acl_graph import (get_graph_params,
update_graph_params_workspaces)
from vllm_ascend.compilation.acl_graph import (
get_draft_graph_params, get_graph_params,
update_draft_graph_params_workspaces, update_graph_params_workspaces)
from vllm_ascend.utils import (AscendDeviceType, get_ascend_device_type,
weak_ref_tensors)
@@ -262,7 +263,9 @@ class AscendAttentionMetadataBuilder(AttentionMetadataBuilder[AscendMetadata]):
common_attn_metadata: AscendCommonAttentionMetadata,
attn_state: AscendAttentionState = AscendAttentionState.DecodeOnly,
):
if attn_state == AscendAttentionState.DecodeOnly:
if attn_state in (AscendAttentionState.DecodeOnly,
AscendAttentionState.ChunkedPrefill):
attn_metadata = self.build(
common_prefix_len=0,
common_attn_metadata=common_attn_metadata,
@@ -319,7 +322,11 @@ class AscendAttentionBackendImpl(AttentionImpl):
= self._get_fia_params(key, value, attn_metadata)
num_tokens = attn_metadata.actual_seq_lengths_q[-1]
graph_params = get_graph_params()
forward_context = get_forward_context()
if forward_context.is_draft_model:
graph_params = get_draft_graph_params()
else:
graph_params = get_graph_params()
actual_seq_lengths_q = attn_metadata.actual_seq_lengths_q
# Prepare tensors for attention output
# TODO: Refactor this to step-level instead of layer-level
@@ -343,7 +350,10 @@ class AscendAttentionBackendImpl(AttentionImpl):
sparse_mode=3,
scale=self.scale,
)
update_graph_params_workspaces(num_tokens, workspace)
if forward_context.is_draft_model:
update_draft_graph_params_workspaces(num_tokens, workspace)
else:
update_graph_params_workspaces(num_tokens, workspace)
# Handle graph capturing mode
stream = torch_npu.npu.current_stream()

View File

@@ -26,8 +26,8 @@ from vllm_ascend.attention.mla_v1 import (AscendMLADecodeMetadata,
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata)
from vllm_ascend.attention.common_cp import (AscendPCPMetadata,
CPChunkedContextMetadata)
from vllm_ascend.compilation.acl_graph import (get_graph_params,
get_mtp_graph_params,
from vllm_ascend.compilation.acl_graph import (get_draft_graph_params,
get_graph_params,
update_graph_params_workspaces)
from vllm_ascend.utils import weak_ref_tensors
@@ -555,8 +555,8 @@ class AscendMlaCPImpl(AscendMLAImpl):
"calc_type": "calc_type_ring",
}
forward_context: ForwardContext = get_forward_context()
if forward_context.is_mtp_model:
graph_params = get_mtp_graph_params()
if forward_context.is_draft_model:
graph_params = get_draft_graph_params()
else:
graph_params = get_graph_params()
if forward_context.capturing:

View File

@@ -27,9 +27,9 @@ from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
split_decodes_and_prefills,
trans_rope_weight, transdata,
wait_for_kv_layer_from_connector)
from vllm_ascend.compilation.acl_graph import (get_graph_params,
get_mtp_graph_params,
update_graph_params_workspaces)
from vllm_ascend.compilation.acl_graph import (
get_draft_graph_params, get_graph_params,
update_draft_graph_params_workspaces, update_graph_params_workspaces)
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
from vllm_ascend.ops.shared_weight_layer import (
is_hidden_layer, post_process_after_loading_for_shared_weight_series,
@@ -1184,8 +1184,8 @@ class AscendMLAImpl(MLAAttentionImpl):
"actual_seq_lengths_kv": decode_meta.seq_lens_list,
}
forward_context: ForwardContext = get_forward_context()
if forward_context.is_mtp_model:
graph_params = get_mtp_graph_params()
if forward_context.is_draft_model:
graph_params = get_draft_graph_params()
else:
graph_params = get_graph_params()
if forward_context.capturing:
@@ -1200,7 +1200,10 @@ class AscendMLAImpl(MLAAttentionImpl):
if workspace is None:
workspace = torch_npu._npu_fused_infer_attention_score_get_max_workspace(
q_nope, k_nope, k_nope, **common_kwargs)
update_graph_params_workspaces(num_tokens, workspace)
if forward_context.is_draft_model:
update_draft_graph_params_workspaces(num_tokens, workspace)
else:
update_graph_params_workspaces(num_tokens, workspace)
attn_output = torch.empty(
(q_nope.shape[1], q_nope.shape[0], *q_nope.shape[2:]),