fixed lm_head.weight error for quantized qwen (#2910)

This commit is contained in:
Rin Intachuen
2025-01-16 21:51:43 +07:00
committed by GitHub
parent 8f2c522aba
commit a2f602b541

View File

@@ -356,6 +356,8 @@ class Qwen2ForCausalLM(nn.Module):
break
else:
# Skip loading extra bias for GPTQ models.
if "lm_head.weight" in name:
continue
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]