fix ep_moe_reorder kernel bugs (#6858)

Co-authored-by: JieXin Liang <Alcanderian@users.noreply.github.com>
This commit is contained in:
Xiaoyu Zhang
2025-06-04 19:13:59 +08:00
committed by GitHub
parent 180ff5eecc
commit bd75690f4e
3 changed files with 226 additions and 31 deletions

View File

@@ -1,8 +1,5 @@
import itertools
import torch
import triton
import triton.language as tl
from sgl_kernel import ep_moe_pre_reorder
from sglang.srt.layers.moe.ep_moe.kernels import pre_reorder_triton_kernel
@@ -25,9 +22,15 @@ configs = [(bs,) for bs in batch_sizes]
)
)
def benchmark(batch_size, provider):
dtype = torch.float32
dtype = torch.bfloat16
device = torch.device("cuda")
hidden_size, topk, start_expert_id, end_expert_id, block_size = 4096, 8, 0, 255, 512
hidden_size, topk, start_expert_id, end_expert_id, block_size = (
4096,
8,
0,
255,
512,
)
# Allocate fresh tensors for every run to match bench_moe_fused_gate style
def alloc_tensors():
@@ -53,9 +56,9 @@ def benchmark(batch_size, provider):
quantiles = [0.5, 0.2, 0.8]
if provider == "cuda":
inp, gout, s2d, tk_ids, scales = alloc_tensors()
def run_cuda():
inp, gout, s2d, tk_ids, scales = alloc_tensors()
ep_moe_pre_reorder(
inp,
gout,
@@ -71,9 +74,9 @@ def benchmark(batch_size, provider):
ms, min_ms, max_ms = triton.testing.do_bench(run_cuda, quantiles=quantiles)
elif provider == "triton":
inp, gout, s2d, tk_ids, scales = alloc_tensors()
def run_triton():
inp, gout, s2d, tk_ids, scales = alloc_tensors()
pre_reorder_triton_kernel[(batch_size,)](
inp.view(-1),
gout.view(-1),