add deepseekv3 and llama4

This commit is contained in:
Chranos
2026-02-11 15:13:14 +08:00
parent 78814aaa68
commit db876765ed
3 changed files with 3 additions and 9 deletions

View File

@@ -21,10 +21,6 @@ def set_default_torch_dtype(dtype: torch.dtype):
def get_model_architecture(
model_config: ModelConfig) -> Tuple[Type[nn.Module], str]:
architectures = getattr(model_config.hf_config, "architectures", None) or []
print(f"[DEBUG-ARCH] get_model_architecture: "
f"type(hf_config)={type(model_config.hf_config).__name__}, "
f"architectures={getattr(model_config.hf_config, 'architectures', 'MISSING')}, "
f"id(hf_config)={id(model_config.hf_config)}")
# Special handling for quantized Mixtral.
# FIXME(woosuk): This is a temporary hack.
mixtral_supported = [