From 8e7b31546c724326780058c510fce3968c0b5285 Mon Sep 17 00:00:00 2001 From: yinfan98 <1106310035@qq.com> Date: Sun, 30 Mar 2025 03:31:59 +0800 Subject: [PATCH] quick fix: add default for new kernel (#4898) --- sgl-kernel/python/sgl_kernel/moe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sgl-kernel/python/sgl_kernel/moe.py b/sgl-kernel/python/sgl_kernel/moe.py index 24066424f..1067a1760 100644 --- a/sgl-kernel/python/sgl_kernel/moe.py +++ b/sgl-kernel/python/sgl_kernel/moe.py @@ -41,6 +41,6 @@ def moe_fused_gate(input_tensor, bias, num_expert_group, topk_group, topk): # the #experts is decided by the input tensor shape and we currently only support power of 2 #experts # and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limitted for now. # for non-supported case, we suggestion to use the biased_grouped_topk func in sglang.srt.layers.moe.topk - return torch.ops.sgl_kernel.moe_fused_gate( + return torch.ops.sgl_kernel.moe_fused_gate.default( input_tensor, bias, num_expert_group, topk_group, topk )