diff --git a/sgl-kernel/python/sgl_kernel/attention.py b/sgl-kernel/python/sgl_kernel/attention.py index d80a6fbbd..52b2d62af 100644 --- a/sgl-kernel/python/sgl_kernel/attention.py +++ b/sgl-kernel/python/sgl_kernel/attention.py @@ -74,9 +74,11 @@ def cutlass_mla_decode( f"but got D_q = {D_q}, D_ckv = {D_ckv}, D_latent = {D_latent}, D_rope = {D_rope}" ) assert H == 128, f"H must be 128, but got {H}" - # TODO: There is currently an illegal memory access issue with page size != - # 128. Change this when it is fixed. - assert PAGE_SIZE == 128, f"PAGE_SIZE must be 128, but got {PAGE_SIZE}" + + assert len(page_table.shape) == 2 + B_block_table, block_num = page_table.shape + assert B_block_table == B_q + assert block_num % (128 / PAGE_SIZE) == 0 # TODO(kaixih@nvidia): support fp8 assert q_nope_and_q_pe.dtype in ( diff --git a/sgl-kernel/tests/test_cutlass_mla.py b/sgl-kernel/tests/test_cutlass_mla.py index 26a59ad7c..a8aab8d15 100644 --- a/sgl-kernel/tests/test_cutlass_mla.py +++ b/sgl-kernel/tests/test_cutlass_mla.py @@ -39,7 +39,7 @@ def ref_mla( @pytest.mark.parametrize("mean_seq_len", [128, 1024, 4096]) @pytest.mark.parametrize("bs", [1, 2, 4]) @pytest.mark.parametrize("varlen", [False, True]) -@pytest.mark.parametrize("block_size", [128]) +@pytest.mark.parametrize("block_size", [1, 16, 64, 128]) def test_cutlass_mla_decode( dtype: torch.dtype, mean_seq_len: int, bs: int, varlen: bool, block_size: int ): @@ -62,6 +62,11 @@ def test_cutlass_mla_decode( max_seq_len = seq_lens.max().item() block_num = (max_seq_len + block_size - 1) // block_size + # Pad block_num so that small blocks can be packed into full 128-sized CUTLASS tiles. + # One 128-wide tile can hold (128 // block_size) small blocks. + pack_factor = 128 // block_size + block_num = ((block_num + pack_factor - 1) // pack_factor) * pack_factor + q = torch.randn(bs, h_q, d) block_table = torch.randint(0, bs * block_num, (bs, block_num), dtype=torch.int32)