[FP8 KV Cache, Mixtral] Avoid KeyError at loading pre-quantized FP8 m… (#1835)

This commit is contained in:
HAI
2024-10-29 13:58:03 -07:00
committed by GitHub
parent d04899d7ca
commit 54dd3ea122

View File

@@ -369,6 +369,9 @@ class MixtralForCausalLM(nn.Module):
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip loading kv_scale from ckpts towards new design.
if name.endswith(".kv_scale") and name not in params_dict:
continue
if name is None:
continue