fix: fix typo of comments in w8a8_fp8.py (#4843)

This commit is contained in:
Jiaqi
2025-03-28 12:06:47 +08:00
committed by GitHub
parent 9fdc6d6abc
commit 72031173e4

View File

@@ -37,7 +37,7 @@ class W8A8Fp8Config(QuantizationConfig):
Note:
- For models without offline quantization, weights will be quantized during model loading
- If CUTLASS is supported: Per-channel weight quantization is used
- If CUTLASS is not supported: Falls back to per-token weight quantization
- If CUTLASS is not supported: Falls back to per-tensor weight quantization
"""
def __init__(self, is_checkpoint_fp8_serialized: bool = False):