Remove assertions about per group quant fp8 (#8717)

This commit is contained in:
fzyzcjy
2025-08-03 08:08:40 +08:00
committed by GitHub
parent 0a56b721d5
commit 403566bcca

View File

@@ -354,10 +354,6 @@ def sglang_per_token_group_quant_fp8(
), "the last dimension of `x` cannot be divisible by `group_size`"
assert x.is_contiguous(), "`x` is not contiguous"
if scale_ue8m0:
# TODO: handle this case by fixing the (token=4, dim=256, group_size=128) UT case
assert x.shape[-1] % (group_size * 4) == 0
x_q = torch.empty_like(x, device=x.device, dtype=fp8_dtype)
x_s = create_per_token_group_quant_fp8_output_scale(
x_shape=x.shape,