[chore] Remove unused ep_moe cuda kernels (#9956)
This commit is contained in:
@@ -77,9 +77,6 @@ from sgl_kernel.memory import set_kv_buffer_kernel
|
||||
from sgl_kernel.moe import (
|
||||
apply_shuffle_mul_sum,
|
||||
cutlass_fp4_group_mm,
|
||||
ep_moe_post_reorder,
|
||||
ep_moe_pre_reorder,
|
||||
ep_moe_silu_and_mul,
|
||||
fp8_blockwise_scaled_grouped_mm,
|
||||
moe_align_block_size,
|
||||
moe_fused_gate,
|
||||
|
||||
@@ -71,70 +71,6 @@ def moe_fused_gate(
|
||||
)
|
||||
|
||||
|
||||
def ep_moe_pre_reorder(
|
||||
input_tensor,
|
||||
gateup_input,
|
||||
src2dst,
|
||||
topk_ids,
|
||||
a1_scales,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
topk,
|
||||
use_per_token_if_dynamic,
|
||||
):
|
||||
return torch.ops.sgl_kernel.ep_moe_pre_reorder.default(
|
||||
input_tensor,
|
||||
gateup_input,
|
||||
src2dst,
|
||||
topk_ids,
|
||||
a1_scales,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
topk,
|
||||
use_per_token_if_dynamic,
|
||||
)
|
||||
|
||||
|
||||
def ep_moe_silu_and_mul(
|
||||
gateup_output,
|
||||
down_input,
|
||||
reorder_topk_ids,
|
||||
scales,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
):
|
||||
return torch.ops.sgl_kernel.ep_moe_silu_and_mul.default(
|
||||
gateup_output,
|
||||
down_input,
|
||||
reorder_topk_ids,
|
||||
scales,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
)
|
||||
|
||||
|
||||
def ep_moe_post_reorder(
|
||||
down_output,
|
||||
output,
|
||||
src2dst,
|
||||
topk_ids,
|
||||
topk_weights,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
topk,
|
||||
):
|
||||
return torch.ops.sgl_kernel.ep_moe_post_reorder.default(
|
||||
down_output,
|
||||
output,
|
||||
src2dst,
|
||||
topk_ids,
|
||||
topk_weights,
|
||||
start_expert_id,
|
||||
end_expert_id,
|
||||
topk,
|
||||
)
|
||||
|
||||
|
||||
def fp8_blockwise_scaled_grouped_mm(
|
||||
output,
|
||||
a_ptrs,
|
||||
|
||||
Reference in New Issue
Block a user