[EP] Add cuda kernel for moe_ep_post_reorder (#6837)

Co-authored-by: luoyuan.luo <luoyuan.luo@antgroup.com>
This commit is contained in:
Yuan Luo
2025-06-05 15:33:47 +08:00
committed by GitHub
parent 0166403c20
commit 43baba649e
7 changed files with 377 additions and 4 deletions

View File

@@ -49,6 +49,7 @@ from sgl_kernel.gemm import (
from sgl_kernel.grammar import apply_token_bitmask_inplace_cuda
from sgl_kernel.moe import (
cutlass_fp4_group_mm,
ep_moe_post_reorder,
ep_moe_pre_reorder,
fp8_blockwise_scaled_grouped_mm,
moe_align_block_size,

View File

@@ -88,6 +88,28 @@ def ep_moe_pre_reorder(
)
def ep_moe_post_reorder(
down_output,
output,
src2dst,
topk_ids,
topk_weights,
start_expert_id,
end_expert_id,
topk,
):
return torch.ops.sgl_kernel.ep_moe_post_reorder.default(
down_output,
output,
src2dst,
topk_ids,
topk_weights,
start_expert_id,
end_expert_id,
topk,
)
def fp8_blockwise_scaled_grouped_mm(
output,
a_ptrs,