From e885bfdc6a4da0766213e80162410abcfe34574b Mon Sep 17 00:00:00 2001 From: Ke Bao Date: Wed, 23 Jul 2025 14:01:47 +0800 Subject: [PATCH] Fix sgl-kernel ci test (#8284) --- sgl-kernel/tests/test_moe_fused_gate.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sgl-kernel/tests/test_moe_fused_gate.py b/sgl-kernel/tests/test_moe_fused_gate.py index 1e1b108c7..b08e0d97b 100644 --- a/sgl-kernel/tests/test_moe_fused_gate.py +++ b/sgl-kernel/tests/test_moe_fused_gate.py @@ -10,7 +10,6 @@ from sglang.srt.layers.moe.topk import biased_grouped_topk list(range(1, 10)) + [16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536], ) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.bfloat16]) @pytest.mark.parametrize( "params", [ @@ -20,13 +19,14 @@ from sglang.srt.layers.moe.topk import biased_grouped_topk ], ) @pytest.mark.parametrize("num_fused_shared_experts", [0, 1, 2]) -def test_moe_fused_gate_combined(seq_length, dtype, params, num_fused_shared_experts): +def test_moe_fused_gate_combined(seq_length, params, num_fused_shared_experts): num_experts, num_expert_group, topk_group, topk = params + dtype = torch.float32 torch.manual_seed(seq_length) - tensor = torch.rand((seq_length, num_experts)).to(dtype).cuda() + tensor = torch.rand((seq_length, num_experts), dtype=dtype, device="cuda") scores = tensor.clone() - bias = torch.rand(num_experts).to(dtype).cuda() + bias = torch.rand(num_experts, dtype=dtype, device="cuda") topk = topk + num_fused_shared_experts output, indices = moe_fused_gate(