[Bugfix] fix w8a8_int8 load issue (#8308)

Co-authored-by: ronnie_zheng <zl19940307@163.com>
This commit is contained in:
Even Zhou
2025-08-01 08:30:16 +08:00
committed by GitHub
parent fe5086fd8b
commit 99795d61e6
2 changed files with 6 additions and 1 deletions

View File

@@ -231,7 +231,10 @@ class W8A8Int8Config(QuantizationConfig):
@classmethod
def get_config_filenames(cls) -> List[str]:
return []
filenames = []
if _is_npu:
filenames.append("quant_model_description.json")
return filenames
@classmethod
def from_config(cls, config: Dict[str, Any]) -> W8A8Int8Config:

View File

@@ -229,6 +229,8 @@ def get_quant_config(
f"Unsupported quantization config"
f" found for {model_config.quantization} in {f}."
)
elif model_config.quantization == "w8a8_int8":
config["packed_modules_mapping"] = packed_modules_mapping
return quant_cls.from_config(config)