quick fix: add default for new kernel (#4898)

This commit is contained in:
yinfan98
2025-03-30 03:31:59 +08:00
committed by GitHub
parent 45dcfc2e76
commit 8e7b31546c

View File

@@ -41,6 +41,6 @@ def moe_fused_gate(input_tensor, bias, num_expert_group, topk_group, topk):
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limitted for now.
# for non-supported case, we suggestion to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
return torch.ops.sgl_kernel.moe_fused_gate(
return torch.ops.sgl_kernel.moe_fused_gate.default(
input_tensor, bias, num_expert_group, topk_group, topk
)