[quantization] fix channelwise conversion with scalar weight scale (#4596)
This commit is contained in:
@@ -74,6 +74,11 @@ def convert_to_channelwise(
|
||||
(sum(logical_widths), 1), dtype=torch.float32, device=weight_scale.device
|
||||
)
|
||||
|
||||
# Handle scalar tensor case: broadcast same scale to all channels
|
||||
if weight_scale.dim() == 0:
|
||||
weight_scale_channel.fill_(weight_scale.item())
|
||||
return weight_scale_channel
|
||||
|
||||
# Expand each scale to match the size of each logical matrix.
|
||||
start = 0
|
||||
for idx, logical_width in enumerate(logical_widths):
|
||||
|
||||
@@ -33,6 +33,15 @@ DEFAULT_FP8_MODEL_NAME_FOR_ACCURACY_TEST = "neuralmagic/Meta-Llama-3-8B-Instruct
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_DYNAMIC_QUANT_ACCURACY_TEST = (
|
||||
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8-dynamic"
|
||||
)
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST = (
|
||||
"nvidia/Llama-3.1-8B-Instruct-FP8"
|
||||
)
|
||||
# TODO(yundai424): right now specifying to an older revision since the latest one
|
||||
# carries kv cache quantization which doesn't work yet
|
||||
DEFAULT_FP8_MODEL_NAME_FOR_MODELOPT_QUANT_ACCURACY_TEST_REVISION = (
|
||||
"13858565416dbdc0b4e7a4a677fadfbd5b9e5bb9"
|
||||
)
|
||||
|
||||
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
|
||||
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
|
||||
Reference in New Issue
Block a user