Set num_fused_shared_experts as num_shared_experts when shared_experts fusion is not disabled (#6736)
This commit is contained in:
@@ -68,7 +68,7 @@ __device__ void moe_fused_gate_impl(
|
||||
}
|
||||
|
||||
// Calculate topk_excluding_share_expert_fusion from topk
|
||||
int64_t topk_excluding_share_expert_fusion = topk - (num_fused_shared_experts > 0 ? 1 : 0);
|
||||
int64_t topk_excluding_share_expert_fusion = topk - num_fused_shared_experts;
|
||||
|
||||
// Cast pointers to type T:
|
||||
auto* input_ptr = reinterpret_cast<T*>(input);
|
||||
@@ -224,13 +224,21 @@ __device__ void moe_fused_gate_impl(
|
||||
|
||||
if (thread_group_idx == 0 && num_fused_shared_experts > 0) {
|
||||
int64_t last_idx = topk * thread_row + topk_excluding_share_expert_fusion;
|
||||
|
||||
// Use round-robin to select expert
|
||||
int64_t expert_offset = thread_row % num_fused_shared_experts;
|
||||
int64_t expert_offset = 0;
|
||||
indices_ptr[last_idx] = static_cast<int32_t>(params.NUM_EXPERTS + expert_offset);
|
||||
|
||||
// Set the weight to the sum of all weights divided by routed_scaling_factor
|
||||
output_ptr[last_idx] = output_sum / routed_scaling_factor;
|
||||
|
||||
if (num_fused_shared_experts > 1) {
|
||||
for (int i = 1; i < num_fused_shared_experts; ++i) {
|
||||
++last_idx;
|
||||
++expert_offset;
|
||||
indices_ptr[last_idx] = static_cast<int32_t>(params.NUM_EXPERTS + expert_offset);
|
||||
// Set the weight to the sum of all weights divided by routed_scaling_factor
|
||||
output_ptr[last_idx] = output_sum / routed_scaling_factor;
|
||||
}
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@ def moe_fused_gate(
|
||||
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
|
||||
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limited for now.
|
||||
# for non-supported case, we suggest to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
|
||||
# num_fused_shared_experts: if > 0, the last expert will be replaced with a round-robin shared expert
|
||||
# routed_scaling_factor: if > 0, the last expert will be scaled by this factor
|
||||
# num_fused_shared_experts: if > 0, the last several experts will be replaced with shared experts
|
||||
# routed_scaling_factor: if > 0, the shared experts will be scaled by this factor
|
||||
return torch.ops.sgl_kernel.moe_fused_gate.default(
|
||||
input_tensor,
|
||||
bias,
|
||||
|
||||
@@ -19,7 +19,7 @@ from sglang.srt.layers.moe.topk import biased_grouped_topk
|
||||
(512, 16, 8, 16),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("num_fused_shared_experts", [0, 1])
|
||||
@pytest.mark.parametrize("num_fused_shared_experts", [0, 1, 2])
|
||||
def test_moe_fused_gate_combined(seq_length, dtype, params, num_fused_shared_experts):
|
||||
num_experts, num_expert_group, topk_group, topk = params
|
||||
|
||||
@@ -27,7 +27,7 @@ def test_moe_fused_gate_combined(seq_length, dtype, params, num_fused_shared_exp
|
||||
tensor = torch.rand((seq_length, num_experts)).to(dtype).cuda()
|
||||
scores = tensor.clone()
|
||||
bias = torch.rand(num_experts).to(dtype).cuda()
|
||||
topk = topk + min(1, num_fused_shared_experts)
|
||||
topk = topk + num_fused_shared_experts
|
||||
|
||||
output, indices = moe_fused_gate(
|
||||
tensor,
|
||||
|
||||
Reference in New Issue
Block a user