[Quant Kernel] refactored per token group quant fp8 to support int8 up-to 2x faster (#4396)

This commit is contained in:
Chunan Zeng
2025-03-23 23:44:17 -07:00
committed by GitHub
parent 3980ff1be6
commit 65c24c28f9
8 changed files with 191 additions and 127 deletions

View File

@@ -31,6 +31,7 @@ from sgl_kernel.gemm import (
int8_scaled_mm,
sgl_per_tensor_quant_fp8,
sgl_per_token_group_quant_fp8,
sgl_per_token_group_quant_int8,
sgl_per_token_quant_fp8,
)
from sgl_kernel.moe import moe_align_block_size, topk_softmax

View File

@@ -96,6 +96,20 @@ def sgl_per_token_group_quant_fp8(
)
def sgl_per_token_group_quant_int8(
input: torch.Tensor,
output_q: torch.Tensor,
output_s: torch.Tensor,
group_size: int,
eps: float,
int8_min: float,
int8_max: float,
) -> None:
torch.ops.sgl_kernel.sgl_per_token_group_quant_int8(
input, output_q, output_s, group_size, eps, int8_min, int8_max
)
def sgl_per_tensor_quant_fp8(
input: torch.Tensor,
output_q: torch.Tensor,