[Feature] support aclgraph for model runner v2 (#7110)
### What this PR does / why we need it?
This PR aims to support aclgraph for model runner v2, please see RFC
#5208. The PR contains these modifications:
- adapt to newest commit of vllm main branch.
- supply a unified interface of extra forward context for both model
runner v1 and model runner v2.
- implement graph mode for main model.
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -6,16 +6,20 @@ import torch
|
||||
import torch_npu
|
||||
import vllm.envs as envs_vllm
|
||||
from vllm.config import VllmConfig, get_current_vllm_config
|
||||
from vllm.forward_context import ForwardContext, get_forward_context
|
||||
from vllm.logger import logger
|
||||
from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder
|
||||
from vllm.model_executor.layers.linear import UnquantizedLinearMethod
|
||||
from vllm.utils.math_utils import cdiv, round_down
|
||||
from vllm.v1.attention.backend import AttentionBackend, AttentionCGSupport, MLAAttentionImpl # type: ignore
|
||||
from vllm.v1.attention.backend import (
|
||||
AttentionBackend, # type: ignore
|
||||
AttentionCGSupport,
|
||||
MLAAttentionImpl,
|
||||
)
|
||||
from vllm.v1.attention.backends.utils import PAD_SLOT_ID # type: ignore
|
||||
from vllm.v1.kv_cache_interface import AttentionSpec, MLAAttentionSpec
|
||||
|
||||
from vllm_ascend.ascend_config import get_ascend_config
|
||||
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
|
||||
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
from vllm_ascend.attention.context_parallel.common_cp import AscendPCPMetadata, CPChunkedContextMetadata
|
||||
@@ -44,12 +48,7 @@ from vllm_ascend.ops.layer_shard_linear import (
|
||||
)
|
||||
from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla
|
||||
from vllm_ascend.quantization.methods import AscendW8A8LinearMethod
|
||||
from vllm_ascend.utils import (
|
||||
ACL_FORMAT_FRACTAL_ND,
|
||||
get_weight_prefetch_method,
|
||||
maybe_trans_nz,
|
||||
weak_ref_tensors,
|
||||
)
|
||||
from vllm_ascend.utils import ACL_FORMAT_FRACTAL_ND, get_weight_prefetch_method, maybe_trans_nz, weak_ref_tensors
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -737,7 +736,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
num_dcp_pcp_tokens=None,
|
||||
draft_attn_metadatas=None,
|
||||
):
|
||||
if forward_context.is_draft_model:
|
||||
if _EXTRA_CTX.is_draft_model:
|
||||
graph_params = get_draft_graph_params()
|
||||
else:
|
||||
graph_params = get_graph_params()
|
||||
@@ -769,12 +768,12 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
softmax_lse,
|
||||
) = param
|
||||
seq_lens_list = forward_context.attn_metadata[key].decode.seq_lens_list
|
||||
if speculative_config and speculative_config.method == "mtp" and not forward_context.is_draft_model:
|
||||
if speculative_config and speculative_config.method == "mtp" and not _EXTRA_CTX.is_draft_model:
|
||||
actual_seq_lengths = forward_context.attn_metadata[key].decode.actual_seq_lengths_q
|
||||
spec_multiple = speculative_config.num_speculative_tokens + 1
|
||||
seq_lens_list = seq_lens_list + [0] * (num_tokens // spec_multiple - len(seq_lens_list))
|
||||
actual_seq_lengths = [spec_multiple * (i + 1) for i in range(num_tokens // spec_multiple)]
|
||||
elif forward_context.is_draft_model:
|
||||
elif _EXTRA_CTX.is_draft_model:
|
||||
actual_seq_lengths = forward_context.attn_metadata[key].decode.actual_seq_lengths_q
|
||||
block_table = forward_context.attn_metadata[key].decode.block_table
|
||||
# TODO: This is a hack and should be fixed in the future.
|
||||
@@ -1243,12 +1242,11 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
"actual_seq_lengths": actual_seq_lengths,
|
||||
"actual_seq_lengths_kv": decode_meta.seq_lens_list,
|
||||
}
|
||||
forward_context: ForwardContext = get_forward_context()
|
||||
if forward_context.is_draft_model:
|
||||
if _EXTRA_CTX.is_draft_model:
|
||||
graph_params = get_draft_graph_params()
|
||||
else:
|
||||
graph_params = get_graph_params()
|
||||
if forward_context.capturing:
|
||||
if _EXTRA_CTX.capturing:
|
||||
stream = torch_npu.npu.current_stream()
|
||||
|
||||
event = torch.npu.ExternalEvent()
|
||||
@@ -1261,7 +1259,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
workspace = torch_npu._npu_fused_infer_attention_score_get_max_workspace(
|
||||
q_nope, k_nope, k_nope, **common_kwargs
|
||||
)
|
||||
if forward_context.is_draft_model:
|
||||
if _EXTRA_CTX.is_draft_model:
|
||||
update_draft_graph_params_workspaces(num_tokens, workspace)
|
||||
else:
|
||||
update_graph_params_workspaces(num_tokens, workspace)
|
||||
@@ -1493,7 +1491,6 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
reach_layer_for_shard_weight_series(layer)
|
||||
return output.fill_(0)
|
||||
|
||||
forward_context = get_forward_context()
|
||||
num_actual_tokens = self.get_num_actual_tokens(attn_metadata)
|
||||
assert (
|
||||
attn_metadata.num_decodes is not None
|
||||
@@ -1505,7 +1502,7 @@ class AscendMLAImpl(MLAAttentionImpl):
|
||||
num_decode_tokens = attn_metadata.num_decode_tokens
|
||||
# Inputs and outputs may be padded for CUDA graphs
|
||||
output_padded = output
|
||||
o_proj_input_shape = (forward_context.num_tokens, self.num_heads * self.v_head_dim)
|
||||
o_proj_input_shape = (_EXTRA_CTX.num_tokens, self.num_heads * self.v_head_dim)
|
||||
o_proj_input = torch.empty(o_proj_input_shape, dtype=hidden_states.dtype, device=hidden_states.device)
|
||||
|
||||
# MLA Preprocess
|
||||
|
||||
Reference in New Issue
Block a user