From 78814aaa688f8c49ed0592bd0c3e58bdd1208742 Mon Sep 17 00:00:00 2001 From: Chranos <826995883@qq.com> Date: Wed, 11 Feb 2026 15:09:59 +0800 Subject: [PATCH] add deepseekv3 and llama4 --- vllm-v0.6.2/vllm/model_executor/model_loader/utils.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py b/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py index 73de3f0..05f6f61 100644 --- a/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py +++ b/vllm-v0.6.2/vllm/model_executor/model_loader/utils.py @@ -21,13 +21,10 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", None) or [] - logger.warning("[DEBUG-ARCH] get_model_architecture: " - "type(hf_config)=%s, architectures=%s, " - "id(hf_config)=%s, has_text_config=%s", - type(model_config.hf_config).__name__, - getattr(model_config.hf_config, "architectures", "MISSING"), - id(model_config.hf_config), - hasattr(model_config.hf_config, "text_config")) + print(f"[DEBUG-ARCH] get_model_architecture: " + f"type(hf_config)={type(model_config.hf_config).__name__}, " + f"architectures={getattr(model_config.hf_config, 'architectures', 'MISSING')}, " + f"id(hf_config)={id(model_config.hf_config)}") # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. mixtral_supported = [