From 07bc24a5551badcf14972d60bf4f6667f0a03b1f Mon Sep 17 00:00:00 2001 From: Xinyu Dong Date: Mon, 5 Jan 2026 10:51:23 +0800 Subject: [PATCH] [Bugs] Fix moe when without bias (#76) --- vllm_kunlun/ops/fused_moe/layer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm_kunlun/ops/fused_moe/layer.py b/vllm_kunlun/ops/fused_moe/layer.py index 84cbd36..772fe1a 100644 --- a/vllm_kunlun/ops/fused_moe/layer.py +++ b/vllm_kunlun/ops/fused_moe/layer.py @@ -196,7 +196,9 @@ class FusedMoE(VllmFusedMoE): self.moe_config = moe self.quant_config = quant_config self.has_bias=has_bias - + self.register_parameter("w13_bias", None) + self.register_parameter("w2_bias", None) + # Note: get_quant_method will look at the layer's local_num_experts # for heuristic purposes, so it must be initialized first. quant_method: Optional[QuantizeMethodBase] = None