add deepseekv3 and llama4

This commit is contained in:
Chranos
2026-02-11 15:07:52 +08:00
parent 5132af6176
commit 44ffd2094a

View File

@@ -21,6 +21,13 @@ def set_default_torch_dtype(dtype: torch.dtype):
def get_model_architecture(
model_config: ModelConfig) -> Tuple[Type[nn.Module], str]:
architectures = getattr(model_config.hf_config, "architectures", None) or []
logger.warning("[DEBUG-ARCH] get_model_architecture: "
"type(hf_config)=%s, architectures=%s, "
"id(hf_config)=%s, has_text_config=%s",
type(model_config.hf_config).__name__,
getattr(model_config.hf_config, "architectures", "MISSING"),
id(model_config.hf_config),
hasattr(model_config.hf_config, "text_config"))
# Special handling for quantized Mixtral.
# FIXME(woosuk): This is a temporary hack.
mixtral_supported = [