[Quant Kernel] refactored per token group quant fp8 to support int8 up-to 2x faster (#4396)
This commit is contained in:
@@ -31,6 +31,7 @@ from sgl_kernel.gemm import (
|
||||
int8_scaled_mm,
|
||||
sgl_per_tensor_quant_fp8,
|
||||
sgl_per_token_group_quant_fp8,
|
||||
sgl_per_token_group_quant_int8,
|
||||
sgl_per_token_quant_fp8,
|
||||
)
|
||||
from sgl_kernel.moe import moe_align_block_size, topk_softmax
|
||||
|
||||
Reference in New Issue
Block a user