delete glmGlmForCausalLM register (#132)

Co-authored-by: hanhaowen <hanhaowen@baidu.com>
This commit is contained in:
youzeyu
2026-01-20 19:22:33 +08:00
committed by GitHub
parent 561a235a3f
commit 92b40628cd
2 changed files with 0 additions and 28 deletions

View File

@@ -36,10 +36,6 @@ def register_model():
ModelRegistry.register_model(
"Qwen3NextForCausalLM",
"vllm_kunlun.models.qwen3_next:Qwen3NextForCausalLM")
ModelRegistry.register_model(
"GlmForCausalLM",
"vllm_kunlun.models.glm:GlmForCausalLM")
ModelRegistry.register_model(
"GptOssForCausalLM",

View File

@@ -1,24 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only HF format GLM-4 model compatible with THUDM weights."""
from vllm.config import VllmConfig
# from vllm.model_executor.models.llama import LlamaForCausalLM
from .llama import LlamaForCausalLM #noqa: F401
from vllm.model_executor.models.utils import PPMissingLayer
class GlmForCausalLM(LlamaForCausalLM):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
print("glm for causalLM initialization!!!!", flush=True)
vllm_config.model_config.hf_config.partial_rotary_factor = 0.5
super().__init__(vllm_config=vllm_config, prefix=prefix)
# Hack Llama model to fit HF format GLM implementation
# Attention difference between GLM and Llama:
# 1. Half partial rotary_dim and no Neox style.
# 2. There is no bias for o_proj in attention
for layer in self.model.layers:
if not isinstance(layer, PPMissingLayer):
layer.self_attn.rotary_emb.is_neox_style = False
layer.self_attn.o_proj.bias = None
layer.self_attn.o_proj.skip_bias_add = True