[Refactor] Rename n_share_experts_fusion as num_fused_shared_experts (#6735)

This commit is contained in:
Cheng Wan
2025-06-03 17:48:24 -07:00
committed by GitHub
parent b6d0ce9f78
commit 8a5480528d
14 changed files with 82 additions and 93 deletions

View File

@@ -42,7 +42,7 @@ def moe_fused_gate(
num_expert_group,
topk_group,
topk,
n_share_experts_fusion=0,
num_fused_shared_experts=0,
routed_scaling_factor=0,
):
# This fused kernel function is used to select topk expert in a hierarchical 2-layer fashion
@@ -51,7 +51,7 @@ def moe_fused_gate(
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limited for now.
# for non-supported case, we suggest to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
# n_share_experts_fusion: if > 0, the last expert will be replaced with a round-robin shared expert
# num_fused_shared_experts: if > 0, the last expert will be replaced with a round-robin shared expert
# routed_scaling_factor: if > 0, the last expert will be scaled by this factor
return torch.ops.sgl_kernel.moe_fused_gate.default(
input_tensor,
@@ -59,7 +59,7 @@ def moe_fused_gate(
num_expert_group,
topk_group,
topk,
n_share_experts_fusion,
num_fused_shared_experts,
routed_scaling_factor,
)