From 1effba4c7030615136a34608ce9f8ddbebd0934a Mon Sep 17 00:00:00 2001 From: Michael Feil <63565275+michaelfeil@users.noreply.github.com> Date: Thu, 17 Apr 2025 21:23:22 -0700 Subject: [PATCH] Configuration qwen2_moe.py - qkv_bias now in transformers (#5512) --- python/sglang/srt/models/qwen2_moe.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/sglang/srt/models/qwen2_moe.py b/python/sglang/srt/models/qwen2_moe.py index 4cbb0df4a..52dd697ef 100644 --- a/python/sglang/srt/models/qwen2_moe.py +++ b/python/sglang/srt/models/qwen2_moe.py @@ -262,8 +262,7 @@ class Qwen2MoeDecoderLayer(nn.Module): rope_theta = getattr(config, "rope_theta", 10000) rope_scaling = getattr(config, "rope_scaling", None) max_position_embeddings = getattr(config, "max_position_embeddings", 8192) - # note: replace config.num_hidden_layers < 80 with True once its available in transformers 4.50.0 - qkv_bias = getattr(config, "qkv_bias", config.num_hidden_layers < 80) + qkv_bias = getattr(config, "qkv_bias", True) self.self_attn = Qwen2MoeAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads,