quick fix: add default for new kernel (#4898)
This commit is contained in:
@@ -41,6 +41,6 @@ def moe_fused_gate(input_tensor, bias, num_expert_group, topk_group, topk):
|
|||||||
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
|
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
|
||||||
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limitted for now.
|
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limitted for now.
|
||||||
# for non-supported case, we suggestion to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
|
# for non-supported case, we suggestion to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
|
||||||
return torch.ops.sgl_kernel.moe_fused_gate(
|
return torch.ops.sgl_kernel.moe_fused_gate.default(
|
||||||
input_tensor, bias, num_expert_group, topk_group, topk
|
input_tensor, bias, num_expert_group, topk_group, topk
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user