[chore] Remove unused ep_moe cuda kernels (#9956)

This commit is contained in:
hlu1
2025-09-06 01:35:50 -07:00
committed by GitHub
parent 039cef76aa
commit 5f1eb20484
13 changed files with 4 additions and 1110 deletions

View File

@@ -71,70 +71,6 @@ def moe_fused_gate(
)
def ep_moe_pre_reorder(
input_tensor,
gateup_input,
src2dst,
topk_ids,
a1_scales,
start_expert_id,
end_expert_id,
topk,
use_per_token_if_dynamic,
):
return torch.ops.sgl_kernel.ep_moe_pre_reorder.default(
input_tensor,
gateup_input,
src2dst,
topk_ids,
a1_scales,
start_expert_id,
end_expert_id,
topk,
use_per_token_if_dynamic,
)
def ep_moe_silu_and_mul(
gateup_output,
down_input,
reorder_topk_ids,
scales,
start_expert_id,
end_expert_id,
):
return torch.ops.sgl_kernel.ep_moe_silu_and_mul.default(
gateup_output,
down_input,
reorder_topk_ids,
scales,
start_expert_id,
end_expert_id,
)
def ep_moe_post_reorder(
down_output,
output,
src2dst,
topk_ids,
topk_weights,
start_expert_id,
end_expert_id,
topk,
):
return torch.ops.sgl_kernel.ep_moe_post_reorder.default(
down_output,
output,
src2dst,
topk_ids,
topk_weights,
start_expert_id,
end_expert_id,
topk,
)
def fp8_blockwise_scaled_grouped_mm(
output,
a_ptrs,