[Feature] support aclgraph for model runner v2 (#7110)
### What this PR does / why we need it?
This PR aims to support aclgraph for model runner v2, please see RFC
#5208. The PR contains these modifications:
- adapt to newest commit of vllm main branch.
- supply a unified interface of extra forward context for both model
runner v1 and model runner v2.
- implement graph mode for main model.
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: Ronald1995 <ronaldautomobile@163.com>
This commit is contained in:
@@ -18,12 +18,11 @@ from collections.abc import Callable
|
||||
|
||||
import torch
|
||||
from vllm.distributed import get_dp_group, get_ep_group, get_tp_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig
|
||||
from vllm.model_executor.layers.fused_moe.layer import FusedMoE, UnquantizedFusedMoEMethod
|
||||
from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE
|
||||
|
||||
from vllm_ascend.ascend_forward_context import MoECommType
|
||||
from vllm_ascend.ascend_forward_context import _EXTRA_CTX, MoECommType
|
||||
from vllm_ascend.ops.fused_moe.experts_selector import zero_experts_compute
|
||||
from vllm_ascend.ops.fused_moe.moe_comm_method import FusedExpertsResult, _MoECommMethods
|
||||
from vllm_ascend.quantization.methods.base import QuantType
|
||||
@@ -93,7 +92,7 @@ class AscendUnquantizedFusedMoEMethod310(UnquantizedFusedMoEMethod):
|
||||
|
||||
topk_weights = topk_weights.to(x.dtype)
|
||||
|
||||
moe_comm_method = get_forward_context().moe_comm_method
|
||||
moe_comm_method = _EXTRA_CTX.moe_comm_method
|
||||
final_hidden_states = moe_comm_method.fused_experts(
|
||||
hidden_states=x,
|
||||
w1=layer.w13_weight,
|
||||
@@ -222,9 +221,8 @@ class AscendFusedMoE310(FusedMoE):
|
||||
) -> torch.Tensor:
|
||||
assert self.quant_method is not None
|
||||
assert self.routed_scaling_factor == 1.0, "routed_scaling_factor != 1.0 is not supported."
|
||||
forward_context = get_forward_context()
|
||||
|
||||
hidden_states, router_logits, _, context_metadata = forward_context.moe_comm_method.prepare(
|
||||
hidden_states, router_logits, _, context_metadata = _EXTRA_CTX.moe_comm_method.prepare(
|
||||
hidden_states=hidden_states, router_logits=router_logits, quant_type=self.quant_type
|
||||
)
|
||||
|
||||
@@ -246,7 +244,7 @@ class AscendFusedMoE310(FusedMoE):
|
||||
apply_router_weight_on_input=self.apply_router_weight_on_input,
|
||||
)
|
||||
|
||||
routed_out = forward_context.moe_comm_method.finalize(
|
||||
routed_out = _EXTRA_CTX.moe_comm_method.finalize(
|
||||
hidden_states=fused_experts_results.routed_out,
|
||||
reduce_results=self.reduce_results,
|
||||
context_metadata=context_metadata,
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
from vllm.forward_context import get_forward_context
|
||||
|
||||
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
|
||||
from vllm_ascend.ops.fused_moe.moe_comm_method import AllGatherCommImpl, FusedExpertsResult
|
||||
|
||||
from .moe_mlp import unified_apply_mlp
|
||||
@@ -50,7 +50,7 @@ class AllGatherCommImpl310(AllGatherCommImpl):
|
||||
) -> FusedExpertsResult:
|
||||
# This method is overridden to use the 310p-specific unified_apply_mlp
|
||||
# which provides optimized MLP computation for the 310p platform
|
||||
moe_comm_method = get_forward_context().moe_comm_method
|
||||
moe_comm_method = _EXTRA_CTX.moe_comm_method
|
||||
assert moe_comm_method is not None, "Missing communication context"
|
||||
|
||||
dispatch_results = self.token_dispatcher.token_dispatch(
|
||||
|
||||
@@ -21,9 +21,9 @@ from typing import Any
|
||||
import torch
|
||||
from vllm.config import get_current_vllm_config
|
||||
from vllm.distributed import get_ep_group
|
||||
from vllm.forward_context import get_forward_context
|
||||
|
||||
from vllm_ascend._310p.fused_moe.experts_selector import select_experts
|
||||
from vllm_ascend.ascend_forward_context import _EXTRA_CTX
|
||||
from vllm_ascend.ops.fused_moe.experts_selector import zero_experts_compute
|
||||
from vllm_ascend.quantization.methods.base import AscendMoEScheme, QuantType
|
||||
|
||||
@@ -125,7 +125,7 @@ class AscendW8A8DynamicFusedMoEMethod310(AscendMoEScheme):
|
||||
|
||||
topk_weights = topk_weights.to(self.in_dtype)
|
||||
|
||||
moe_comm_method = get_forward_context().moe_comm_method
|
||||
moe_comm_method = _EXTRA_CTX.moe_comm_method
|
||||
|
||||
final_hidden_states = moe_comm_method.fused_experts(
|
||||
hidden_states=x,
|
||||
|
||||
Reference in New Issue
Block a user