[Bugs] Fix moe when without bias (#76)

This commit is contained in:
Xinyu Dong
2026-01-05 10:51:23 +08:00
committed by GitHub
parent b86953acf9
commit 07bc24a555

View File

@@ -196,7 +196,9 @@ class FusedMoE(VllmFusedMoE):
self.moe_config = moe
self.quant_config = quant_config
self.has_bias=has_bias
self.register_parameter("w13_bias", None)
self.register_parameter("w2_bias", None)
# Note: get_quant_method will look at the layer's local_num_experts
# for heuristic purposes, so it must be initialized first.
quant_method: Optional[QuantizeMethodBase] = None