From 45e1fa8bb3b93fd2ea7e6a0b58b356e20aead951 Mon Sep 17 00:00:00 2001 From: Chranos <826995883@qq.com> Date: Wed, 11 Feb 2026 15:07:52 +0800 Subject: [PATCH] add deepseekv3 and llama4 --- vllm-v0.6.2/vllm/model_executor/model_loader/utils.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py b/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py index 6a41f9d..73de3f0 100644 --- a/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py +++ b/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py @@ -21,6 +21,13 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", None) or [] + logger.warning("[DEBUG-ARCH] get_model_architecture: " + "type(hf_config)=%s, architectures=%s, " + "id(hf_config)=%s, has_text_config=%s", + type(model_config.hf_config).__name__, + getattr(model_config.hf_config, "architectures", "MISSING"), + id(model_config.hf_config), + hasattr(model_config.hf_config, "text_config")) # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. mixtral_supported = [