Fix incorrect LoRA weight loading for fused gate_up_proj (#6734)
This commit is contained in:
@@ -209,4 +209,12 @@ class LoRAAdapter(nn.Module):
|
||||
gate_up_name = weight_name
|
||||
if "lora_A" in weight_name:
|
||||
weights[gate_up_name] = weights[gate_up_name].repeat(2, 1)
|
||||
# else: "lora_B" is already stacked, no operations is needed.
|
||||
else:
|
||||
output_dim = weights[gate_up_name].shape[0] // 2
|
||||
weights[gate_up_name] = torch.stack(
|
||||
[
|
||||
weights[gate_up_name][:output_dim, :],
|
||||
weights[gate_up_name][output_dim:, :],
|
||||
],
|
||||
dim=0,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user