Update seed in CPU UTs to avoid flaky failure with single test (#7544)
This commit is contained in:
@@ -8,7 +8,7 @@ from utils import SiluAndMul, precision
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestActivation(CustomTestCase):
|
||||
|
||||
@@ -6,7 +6,7 @@ from torch.nn.functional import scaled_dot_product_attention
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestDecodeAttention(CustomTestCase):
|
||||
|
||||
@@ -6,7 +6,7 @@ from torch.nn.functional import scaled_dot_product_attention
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestExtendAttention(CustomTestCase):
|
||||
|
||||
@@ -14,7 +14,7 @@ from utils import (
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class Mod(nn.Module):
|
||||
|
||||
@@ -8,7 +8,7 @@ from utils import precision
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestMLA(CustomTestCase):
|
||||
|
||||
@@ -8,7 +8,7 @@ import torch
|
||||
|
||||
kernel = torch.ops.sgl_kernel
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
from utils import (
|
||||
BLOCK_K,
|
||||
|
||||
@@ -8,7 +8,7 @@ from utils import make_non_contiguous, precision
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestNorm(CustomTestCase):
|
||||
|
||||
@@ -15,7 +15,7 @@ from sglang.test.test_utils import CustomTestCase
|
||||
convert_weight_packed = torch.ops.sgl_kernel.convert_weight_packed
|
||||
qkv_proj_with_rope = torch.ops.sgl_kernel.qkv_proj_with_rope
|
||||
qkv_proj_with_rope_fused_weight = torch.ops.sgl_kernel.qkv_proj_with_rope_fused_weight
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
# constants
|
||||
kv_lora_rank = 512
|
||||
qk_head_dim = 192
|
||||
|
||||
@@ -10,7 +10,7 @@ from sglang.srt.layers.rotary_embedding import (
|
||||
)
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestROPE(CustomTestCase):
|
||||
|
||||
@@ -22,7 +22,7 @@ from utils import (
|
||||
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
class TestSharedExpert(CustomTestCase):
|
||||
|
||||
@@ -13,7 +13,7 @@ from sglang.srt.layers.moe.topk import grouped_topk_gpu as native_grouped_topk
|
||||
from sglang.srt.models.llama4 import Llama4MoE
|
||||
from sglang.test.test_utils import CustomTestCase
|
||||
|
||||
torch.manual_seed(0)
|
||||
torch.manual_seed(1234)
|
||||
|
||||
|
||||
# This is used by the Deepseek-V2 model
|
||||
|
||||
Reference in New Issue
Block a user