Small refactor DeepEPMode to clean up code a bit (#4992)
This commit is contained in:
@@ -38,7 +38,7 @@ from sglang.srt.layers.quantization.base_config import (
|
||||
)
|
||||
from sglang.srt.layers.quantization.fp8 import Fp8Config, Fp8MoEMethod
|
||||
from sglang.srt.model_executor.forward_batch_info import ForwardMode
|
||||
from sglang.srt.utils import is_cuda, is_hip, set_weight_attrs
|
||||
from sglang.srt.utils import DeepEPMode, is_cuda, is_hip, set_weight_attrs
|
||||
|
||||
_is_cuda = is_cuda()
|
||||
|
||||
@@ -47,7 +47,6 @@ if _is_cuda:
|
||||
else:
|
||||
from vllm import _custom_ops as vllm_ops
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_is_hip = is_hip()
|
||||
@@ -814,7 +813,7 @@ class DeepEPMoE(EPMoE):
|
||||
correction_bias: Optional[torch.Tensor] = None,
|
||||
custom_routing_function: Optional[Callable] = None,
|
||||
activation: str = "silu",
|
||||
deepep_mode: str = "auto",
|
||||
deepep_mode: DeepEPMode = DeepEPMode.auto,
|
||||
):
|
||||
super().__init__(
|
||||
num_experts,
|
||||
@@ -834,7 +833,7 @@ class DeepEPMoE(EPMoE):
|
||||
activation,
|
||||
)
|
||||
self.deepep_mode = deepep_mode
|
||||
if self.deepep_mode in ["low_latency", "auto"]:
|
||||
if self.deepep_mode.enable_low_latency():
|
||||
assert use_deep_gemm, f"DeepEP {self.deepep_mode} mode requires deep_gemm"
|
||||
self.w13_weight_fp8 = (
|
||||
self.w13_weight,
|
||||
@@ -858,13 +857,10 @@ class DeepEPMoE(EPMoE):
|
||||
expected_m: int,
|
||||
forward_mode: ForwardMode,
|
||||
):
|
||||
if self.deepep_mode == "normal" or (
|
||||
self.deepep_mode == "auto" and not forward_mode.is_decode()
|
||||
):
|
||||
resolved_deepep_mode = self.deepep_mode.resolve(forward_mode)
|
||||
if resolved_deepep_mode == DeepEPMode.normal:
|
||||
return self.forward_normal(hidden_states, reorder_topk_ids, seg_indptr)
|
||||
elif self.deepep_mode == "low_latency" or (
|
||||
self.deepep_mode == "auto" and forward_mode.is_decode()
|
||||
):
|
||||
elif resolved_deepep_mode == DeepEPMode.low_latency:
|
||||
return self.forward_deepgemm_masked(hidden_states, masked_m, expected_m)
|
||||
else:
|
||||
raise ValueError(f"Invalid deepep_mode: {self.deepep_mode}")
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from sglang.srt.utils import DeepEPMode
|
||||
|
||||
try:
|
||||
from deep_ep import Buffer
|
||||
|
||||
@@ -98,7 +100,7 @@ class DeepEPDispatcher:
|
||||
num_local_experts: int = None,
|
||||
hidden_size: int = None,
|
||||
params_dtype: torch.dtype = None,
|
||||
deepep_mode: str = "auto",
|
||||
deepep_mode: DeepEPMode = DeepEPMode.auto,
|
||||
async_finish: bool = False,
|
||||
return_recv_hook: bool = False,
|
||||
):
|
||||
@@ -120,13 +122,13 @@ class DeepEPDispatcher:
|
||||
self.deepep_mode = deepep_mode
|
||||
self.handle = None
|
||||
|
||||
if self.deepep_mode in ["normal", "auto"]: # for normal / auto mode
|
||||
if self.deepep_mode.enable_normal():
|
||||
self.buffer_normal = get_buffer_normal(
|
||||
self.group, self.hidden_size * self.params_bytes
|
||||
)
|
||||
self.async_finish = async_finish
|
||||
self.src2dst = None
|
||||
if self.deepep_mode in ["low_latency", "auto"]: # for low_latency / auto mode
|
||||
if self.deepep_mode.enable_low_latency():
|
||||
"""
|
||||
num_max_dispatch_tokens_per_rank: the actual batch size in the decoding engine should be less than 256
|
||||
https://github.com/deepseek-ai/DeepEP?tab=readme-ov-file#example-use-in-inference-decoding
|
||||
@@ -196,9 +198,8 @@ class DeepEPDispatcher:
|
||||
)
|
||||
expected_m = 0
|
||||
|
||||
if self.deepep_mode == "normal" or (
|
||||
self.deepep_mode == "auto" and not forward_mode.is_decode()
|
||||
):
|
||||
resolved_deepep_mode = self.deepep_mode.resolve(forward_mode)
|
||||
if resolved_deepep_mode == DeepEPMode.normal:
|
||||
(
|
||||
hidden_states,
|
||||
topk_idx,
|
||||
@@ -210,9 +211,7 @@ class DeepEPDispatcher:
|
||||
reorder_topk_ids, seg_indptr, hidden_states = self.deepep_permute(
|
||||
hidden_states, topk_idx, fp8_dtype=hidden_states.dtype
|
||||
)
|
||||
elif self.deepep_mode == "low_latency" or (
|
||||
self.deepep_mode == "auto" and forward_mode.is_decode()
|
||||
):
|
||||
elif resolved_deepep_mode == DeepEPMode.low_latency:
|
||||
expected_m = (
|
||||
hidden_states.shape[0]
|
||||
* self.buffer_low_latency.group_size
|
||||
@@ -354,9 +353,8 @@ class DeepEPDispatcher:
|
||||
topk_weights: torch.Tensor,
|
||||
forward_mode: ForwardMode,
|
||||
) -> torch.Tensor:
|
||||
if self.deepep_mode == "normal" or (
|
||||
self.deepep_mode == "auto" and not forward_mode.is_decode()
|
||||
):
|
||||
resolved_deepep_mode = self.deepep_mode.resolve(forward_mode)
|
||||
if resolved_deepep_mode == DeepEPMode.normal:
|
||||
if hidden_states.shape[0] > 0:
|
||||
num_tokens = self.src2dst.shape[0] // self.router_topk
|
||||
output = torch.empty(
|
||||
@@ -384,9 +382,7 @@ class DeepEPDispatcher:
|
||||
output,
|
||||
)
|
||||
event.current_stream_wait() if self.async_finish else ()
|
||||
elif self.deepep_mode == "low_latency" or (
|
||||
self.deepep_mode == "auto" and forward_mode.is_decode()
|
||||
):
|
||||
elif resolved_deepep_mode == DeepEPMode.low_latency:
|
||||
hidden_states, event, hook = self.combine_low_latency(
|
||||
hidden_states,
|
||||
topk_idx,
|
||||
|
||||
Reference in New Issue
Block a user