Fix shared experts fusion + weight requant (#7177)

This commit is contained in:
fzyzcjy
2025-06-14 17:35:18 +08:00
committed by GitHub
parent 98538822d5
commit b57d87c297

View File

@@ -1960,7 +1960,8 @@ class DeepseekV2ForCausalLM(nn.Module):
)
if layer_id in moe_layers:
shared_experts = layer.mlp.shared_experts
shared_experts = getattr(layer.mlp, "shared_experts", None)
if shared_experts is not None:
for module in [
shared_experts.gate_up_proj,
shared_experts.down_proj,