Support FP4 gemm (1/2) (#3899)
This commit is contained in:
151
sgl-kernel/tests/test_fp4_gemm.py
Normal file
151
sgl-kernel/tests/test_fp4_gemm.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import pytest
|
||||
import torch
|
||||
from sgl_kernel import cutlass_scaled_fp4_mm, scaled_fp4_quant
|
||||
|
||||
if torch.cuda.get_device_capability() < (10, 0):
|
||||
pytest.skip(
|
||||
reason="Nvfp4 Requires compute capability of 10 or above.",
|
||||
allow_module_level=True,
|
||||
)
|
||||
|
||||
DTYPES = [torch.float16, torch.bfloat16]
|
||||
# m, n, k
|
||||
SHAPES = [(128, 128, 64), (128, 128, 128), (256, 128, 64), (128, 256, 128)]
|
||||
PAD_SHAPES = [(150, 128, 64), (128, 128, 96)]
|
||||
SHAPES.extend(PAD_SHAPES)
|
||||
|
||||
FLOAT4_E2M1_MAX = 6.0
|
||||
FLOAT8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max
|
||||
|
||||
kE2M1ToFloatArray = [
|
||||
0.0,
|
||||
0.5,
|
||||
1.0,
|
||||
1.5,
|
||||
2.0,
|
||||
3.0,
|
||||
4.0,
|
||||
6.0,
|
||||
]
|
||||
|
||||
|
||||
def e2m1_to_fp32(int4_value):
|
||||
signBit = int4_value & 0x8
|
||||
int4_absValue = int4_value & 0x7
|
||||
float_result = kE2M1ToFloatArray[int4_absValue]
|
||||
if signBit:
|
||||
float_result = -float_result
|
||||
return float_result
|
||||
|
||||
|
||||
def break_fp4_bytes(a, dtype):
|
||||
assert a.dtype == torch.uint8
|
||||
m, n = a.shape
|
||||
a = a.flatten()
|
||||
# Get upper 4 bits
|
||||
highHalfByte = (a & 0xF0) >> 4
|
||||
# Get lower 4 bits
|
||||
lowHalfByte = a & 0x0F
|
||||
fH = torch.tensor([e2m1_to_fp32(x) for x in highHalfByte]).to(a.device)
|
||||
fL = torch.tensor([e2m1_to_fp32(x) for x in lowHalfByte]).to(a.device)
|
||||
# [0xAB, 0xCD] -> [0xB, 0xA, 0xD, 0xC]
|
||||
out = torch.stack((fL, fH), dim=-1).reshape(m, n * 2)
|
||||
return out
|
||||
|
||||
|
||||
def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size):
|
||||
sf_m, sf_k = a_sf_swizzled.shape
|
||||
m_tiles = (m + 128 - 1) // 128
|
||||
f = block_size * 4
|
||||
k_tiles = (k + f - 1) // f
|
||||
tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4))
|
||||
tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5))
|
||||
out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size)
|
||||
return out[0:m, 0:k]
|
||||
|
||||
|
||||
def dequantize_to_dtype(
|
||||
tensor_fp4, tensor_sf, global_scale, dtype, device, block_size=16
|
||||
):
|
||||
"""Dequantize the fp4 tensor back to high precision."""
|
||||
# Two fp4 values are packed into one uint8.
|
||||
assert tensor_fp4.dtype == torch.uint8
|
||||
m, packed_k = tensor_fp4.shape
|
||||
k = packed_k * 2
|
||||
tensor_f32 = break_fp4_bytes(tensor_fp4, dtype)
|
||||
tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size)
|
||||
tensor_sf = tensor_sf.view(torch.float8_e4m3fn)
|
||||
tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size)
|
||||
tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale
|
||||
|
||||
# scale the tensor
|
||||
out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k)
|
||||
return out
|
||||
|
||||
|
||||
def get_ref_results(
|
||||
a_fp4,
|
||||
b_fp4,
|
||||
a_sf,
|
||||
b_sf,
|
||||
a_global_scale,
|
||||
b_global_scale,
|
||||
m,
|
||||
n,
|
||||
dtype,
|
||||
block_size,
|
||||
device,
|
||||
):
|
||||
_, m_k = a_fp4.shape
|
||||
_, n_k = b_fp4.shape
|
||||
assert m_k == n_k
|
||||
a_in_dtype = dequantize_to_dtype(
|
||||
a_fp4, a_sf, a_global_scale, dtype=dtype, device=device, block_size=block_size
|
||||
)
|
||||
b_in_dtype = dequantize_to_dtype(
|
||||
b_fp4, b_sf, b_global_scale, dtype=dtype, device=device, block_size=block_size
|
||||
)
|
||||
return torch.matmul(a_in_dtype, b_in_dtype.t())
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("shape", SHAPES)
|
||||
@torch.inference_mode()
|
||||
def test_nvfp4_gemm(
|
||||
dtype: torch.dtype,
|
||||
shape: tuple[int, int],
|
||||
) -> None:
|
||||
m, n, packed_k = shape
|
||||
k = packed_k * 2
|
||||
block_size = 16
|
||||
a_dtype = torch.randn((m, k), dtype=dtype, device="cuda")
|
||||
b_dtype = torch.randn((n, k), dtype=dtype, device="cuda")
|
||||
|
||||
a_global_scale = (
|
||||
(FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a_dtype.flatten(), dim=-1)
|
||||
).to(torch.float32)
|
||||
b_global_scale = (
|
||||
(FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(b_dtype.flatten(), dim=-1)
|
||||
).to(torch.float32)
|
||||
alpha = 1.0 / (a_global_scale * b_global_scale)
|
||||
a_fp4, a_scale_interleaved = scaled_fp4_quant(a_dtype, a_global_scale)
|
||||
b_fp4, b_scale_interleaved = scaled_fp4_quant(b_dtype, b_global_scale)
|
||||
|
||||
expected_out = get_ref_results(
|
||||
a_fp4,
|
||||
b_fp4,
|
||||
a_scale_interleaved,
|
||||
b_scale_interleaved,
|
||||
a_global_scale,
|
||||
b_global_scale,
|
||||
m,
|
||||
n,
|
||||
dtype,
|
||||
block_size,
|
||||
"cuda",
|
||||
)
|
||||
out = cutlass_scaled_fp4_mm(
|
||||
a_fp4, b_fp4, a_scale_interleaved, b_scale_interleaved, alpha, dtype
|
||||
)
|
||||
|
||||
torch.testing.assert_close(out, expected_out.to(dtype=dtype), atol=1e-1, rtol=1e-1)
|
||||
164
sgl-kernel/tests/test_fp4_quantize.py
Normal file
164
sgl-kernel/tests/test_fp4_quantize.py
Normal file
@@ -0,0 +1,164 @@
|
||||
import pytest
|
||||
import torch
|
||||
from sgl_kernel import scaled_fp4_quant
|
||||
|
||||
if torch.cuda.get_device_capability() < (10, 0):
|
||||
pytest.skip(
|
||||
reason="Nvfp4 Requires compute capability of 10 or above.",
|
||||
allow_module_level=True,
|
||||
)
|
||||
|
||||
DTYPES = [torch.float16, torch.bfloat16]
|
||||
SHAPES = [(128, 64), (128, 128), (256, 64), (256, 128)]
|
||||
PAD_SHAPES = [
|
||||
(90, 64),
|
||||
(150, 64),
|
||||
(128, 48),
|
||||
(128, 80),
|
||||
(150, 80),
|
||||
(90, 48),
|
||||
(90, 128),
|
||||
(150, 128),
|
||||
(150, 48),
|
||||
(90, 80),
|
||||
]
|
||||
|
||||
FLOAT4_E2M1_MAX = 6.0
|
||||
FLOAT8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max
|
||||
|
||||
# E2M1 to float
|
||||
# 0111 -> 6
|
||||
# 0110 -> 4
|
||||
# 0101 -> 3
|
||||
# 0100 -> 2
|
||||
# 0011 -> 1.5
|
||||
# 0010 -> 1
|
||||
# 0001 -> 0.5
|
||||
# 0000 -> 0
|
||||
E2M1_TO_FLOAT32 = [
|
||||
0.0,
|
||||
0.5,
|
||||
1.0,
|
||||
1.5,
|
||||
2.0,
|
||||
3.0,
|
||||
4.0,
|
||||
6.0,
|
||||
0.0,
|
||||
-0.5,
|
||||
-1.0,
|
||||
-1.5,
|
||||
-2.0,
|
||||
-3.0,
|
||||
-4.0,
|
||||
-6.0,
|
||||
]
|
||||
BLOCK_SIZE = 16
|
||||
|
||||
|
||||
def cast_from_fp4(x, m, n):
|
||||
# The fp4 values are packed in uint8 as [v_1st | v_2nd]
|
||||
v_2nd = x & 0xF
|
||||
v_1st = (x >> 4) & 0xF
|
||||
c = torch.stack((v_2nd, v_1st), dim=-1)
|
||||
out = torch.tensor([E2M1_TO_FLOAT32[x] for x in c.flatten()])
|
||||
out = out.reshape(m, n).to(torch.float32)
|
||||
return out
|
||||
|
||||
|
||||
def cast_to_fp4(x):
|
||||
sign = torch.sign(x)
|
||||
x = torch.abs(x)
|
||||
x[(x >= 0.0) & (x <= 0.25)] = 0.0
|
||||
x[(x > 0.25) & (x < 0.75)] = 0.5
|
||||
x[(x >= 0.75) & (x <= 1.25)] = 1.0
|
||||
x[(x > 1.25) & (x < 1.75)] = 1.5
|
||||
x[(x >= 1.75) & (x <= 2.5)] = 2.0
|
||||
x[(x > 2.5) & (x < 3.5)] = 3.0
|
||||
x[(x >= 3.5) & (x <= 5.0)] = 4.0
|
||||
x[x > 5.0] = 6.0
|
||||
return x * sign
|
||||
|
||||
|
||||
def get_reciprocal(x):
|
||||
if isinstance(x, torch.Tensor):
|
||||
return torch.where(x == 0, torch.tensor(0.0, dtype=x.dtype), 1.0 / x)
|
||||
elif isinstance(x, (float, int)):
|
||||
return 0.0 if x == 0 else 1.0 / x
|
||||
else:
|
||||
raise TypeError("Input must be a float, int, or a torch.Tensor.")
|
||||
|
||||
|
||||
def ref_nvfp4_quant(x, global_scale):
|
||||
assert global_scale.dtype == torch.float32
|
||||
assert x.ndim == 2
|
||||
m, n = x.shape
|
||||
x = torch.reshape(x, (m, n // BLOCK_SIZE, BLOCK_SIZE))
|
||||
vec_max = torch.max(torch.abs(x), dim=-1, keepdim=True)[0].to(torch.float32)
|
||||
scale = global_scale * (vec_max * get_reciprocal(FLOAT4_E2M1_MAX))
|
||||
scale = scale.to(torch.float8_e4m3fn).to(torch.float32)
|
||||
output_scale = get_reciprocal(scale * get_reciprocal(global_scale))
|
||||
|
||||
scaled_x = x.to(torch.float32) * output_scale
|
||||
clipped_x = torch.clamp(scaled_x, -6.0, 6.0).reshape(m, n)
|
||||
return cast_to_fp4(clipped_x), scale.squeeze(-1)
|
||||
|
||||
|
||||
def recover_swizzled_scales(scale, m, n):
|
||||
rounded_m = ((m + 128 - 1) // 128) * 128
|
||||
scale_n = n // BLOCK_SIZE
|
||||
rounded_n = ((scale_n + 4 - 1) // 4) * 4
|
||||
# Recover the swizzled scaling factor to linear layout
|
||||
tmp = torch.reshape(scale, (1, rounded_m // 128, rounded_n // 4, 32, 4, 4))
|
||||
tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5))
|
||||
result = torch.reshape(tmp, (rounded_m, rounded_n)).to(torch.float32)
|
||||
return result[:m, :scale_n]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", DTYPES)
|
||||
@pytest.mark.parametrize("shape", SHAPES)
|
||||
@torch.inference_mode()
|
||||
def test_quantize_to_fp4(
|
||||
dtype: torch.dtype,
|
||||
shape: tuple[int, int],
|
||||
) -> None:
|
||||
torch.manual_seed(42)
|
||||
torch.set_default_device("cuda:0")
|
||||
|
||||
m, n = shape
|
||||
|
||||
x = torch.randn((m, n), dtype=dtype)
|
||||
tensor_amax = torch.abs(x).max().to(torch.float32)
|
||||
global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / tensor_amax
|
||||
out_ref, scale_ref = ref_nvfp4_quant(x, global_scale)
|
||||
|
||||
out, out_scale = scaled_fp4_quant(x, global_scale)
|
||||
scale_ans = recover_swizzled_scales(out_scale, m, n)
|
||||
out_ans = cast_from_fp4(out, m, n)
|
||||
|
||||
torch.testing.assert_close(out_ans, out_ref)
|
||||
torch.testing.assert_close(scale_ans, scale_ref)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("pad_shape", PAD_SHAPES)
|
||||
@torch.inference_mode()
|
||||
def test_quantize_to_fp4_padded(pad_shape: tuple[int, int]) -> None:
|
||||
torch.manual_seed(42)
|
||||
dtype = torch.float16
|
||||
torch.set_default_device("cuda:0")
|
||||
|
||||
m, n = pad_shape
|
||||
|
||||
x = torch.randn((m, n), dtype=dtype)
|
||||
|
||||
tensor_amax = torch.abs(x).max().to(torch.float32)
|
||||
global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / tensor_amax
|
||||
out_ref, scale_ref = ref_nvfp4_quant(x, global_scale)
|
||||
|
||||
out, out_scale = scaled_fp4_quant(x, global_scale)
|
||||
|
||||
scale_ans = recover_swizzled_scales(out_scale, m, n)
|
||||
out_ans = cast_from_fp4(out, m, n)
|
||||
|
||||
torch.testing.assert_close(out_ans, out_ref)
|
||||
torch.testing.assert_close(scale_ans, scale_ref)
|
||||
Reference in New Issue
Block a user