[Feature] Support glmx (#194)

Signed-off-by: Li Wei <liwei.109@outlook.com>
Co-authored-by: tangshiwen <tangshiwen@baidu.com>
Co-authored-by: Xinyu Dong <dongxinyu03@baidu.com>
This commit is contained in:
Li Wei
2026-02-12 15:40:42 +08:00
committed by GitHub
parent 070bfa4a73
commit 744719587e
9 changed files with 159 additions and 7 deletions

View File

@@ -47,6 +47,16 @@ def register():
"""Register the Kunlun platform"""
from .utils import redirect_output
from .vllm_utils_wrapper import direct_register_custom_op, patch_annotations_for_schema
# Change for GLM5
if "vllm.transformers_utils.config" in sys.modules:
from .transformer_utils.config import _XPU_CONFIG_REGISTRY
sys.modules["vllm.transformers_utils.config"]._CONFIG_REGISTRY = _XPU_CONFIG_REGISTRY
import vllm.config.model as model_module
from .config.model import is_deepseek_mla
model_module.ModelConfig.is_deepseek_mla = property(is_deepseek_mla)
import_hook()
return "vllm_kunlun.platforms.kunlun.KunlunPlatform"

View File

View File

@@ -0,0 +1,22 @@
def is_deepseek_mla(self) -> bool:
if not hasattr(self.hf_text_config, "model_type"):
return False
elif self.hf_text_config.model_type in (
"deepseek_v2",
"deepseek_v3",
"deepseek_v32",
"deepseek_mtp",
"kimi_k2",
"longcat_flash",
"glm_moe_dsa",
):
return self.hf_text_config.kv_lora_rank is not None
elif self.hf_text_config.model_type == "eagle":
# if the model is an EAGLE module, check for the
# underlying architecture
return (
self.hf_text_config.model.model_type
in ("deepseek_v2", "deepseek_v3", "deepseek_v32")
and self.hf_text_config.kv_lora_rank is not None
)
return False

View File

@@ -89,5 +89,9 @@ def register_model():
"DeepSeekMTPModel",
"vllm_kunlun.models.deepseek_mtp:DeepSeekMTP")
ModelRegistry.register_model(
"GlmMoeDsaForCausalLM",
"vllm_kunlun.models.deepseek_v2:GlmMoeDsaForCausalLM")
def register_quant_method():
"""to do"""

View File

@@ -1339,6 +1339,10 @@ class DeepseekV3ForCausalLM(DeepseekV2ForCausalLM):
pass
class GlmMoeDsaForCausalLM(DeepseekV2ForCausalLM):
pass
# Compatibility with
# https://huggingface.co/deepseek-ai/DeepSeek-V3-Base/blob/main/configuration_deepseek.py
def get_spec_layer_idx_from_weight_name(config: Union[DeepseekV2Config,

View File

@@ -195,10 +195,6 @@ class KunlunOps:
query_x = query.contiguous()
key_x = key.contiguous()
num_tokens = query_x.shape[0]
num_heads = query_x.shape[1] // head_size
num_kv_heads = key_x.shape[1] // head_size
torch.ops._C.rotary_embedding(
positions,
query_x,
@@ -207,9 +203,6 @@ class KunlunOps:
cos_sin_cache,
is_neox_style)
query_x = query_x.view(num_tokens, num_heads * head_size)
key_x = key_x.view(num_tokens, num_kv_heads * head_size)
return query_x, key_x
# Rotary embedding

View File

@@ -0,0 +1,27 @@
from transformers import PretrainedConfig
from vllm.transformers_utils.config import LazyConfigDict, _CONFIG_REGISTRY
_XPU_CONFIG_REGISTRY: dict[str, type[PretrainedConfig]] = LazyConfigDict(
chatglm="ChatGLMConfig",
deepseek_vl_v2="DeepseekVLV2Config",
deepseek_v3="DeepseekV3Config",
deepseek_v32="DeepseekV3Config",
glm_moe_dsa="DeepseekV3Config",
kimi_vl="KimiVLConfig",
Llama_Nemotron_Nano_VL="Nemotron_Nano_VL_Config",
RefinedWeb="RWConfig", # For tiiuae/falcon-40b(-instruct)
RefinedWebModel="RWConfig", # For tiiuae/falcon-7b(-instruct)
jais="JAISConfig",
mlp_speculator="MLPSpeculatorConfig",
medusa="MedusaConfig",
midashenglm="MiDashengLMConfig",
eagle="EAGLEConfig",
speculators="SpeculatorsConfig",
nemotron="NemotronConfig",
olmo3="Olmo3Config",
ovis="OvisConfig",
ultravox="UltravoxConfig",
step3_vl="Step3VLConfig",
step3_text="Step3TextConfig",
qwen3_next="Qwen3NextConfig",
)