forked from EngineX-Cambricon/enginex-mlu370-vllm
add deepseekv3 and llama4
This commit is contained in:
@@ -21,13 +21,10 @@ def set_default_torch_dtype(dtype: torch.dtype):
|
||||
def get_model_architecture(
|
||||
model_config: ModelConfig) -> Tuple[Type[nn.Module], str]:
|
||||
architectures = getattr(model_config.hf_config, "architectures", None) or []
|
||||
logger.warning("[DEBUG-ARCH] get_model_architecture: "
|
||||
"type(hf_config)=%s, architectures=%s, "
|
||||
"id(hf_config)=%s, has_text_config=%s",
|
||||
type(model_config.hf_config).__name__,
|
||||
getattr(model_config.hf_config, "architectures", "MISSING"),
|
||||
id(model_config.hf_config),
|
||||
hasattr(model_config.hf_config, "text_config"))
|
||||
print(f"[DEBUG-ARCH] get_model_architecture: "
|
||||
f"type(hf_config)={type(model_config.hf_config).__name__}, "
|
||||
f"architectures={getattr(model_config.hf_config, 'architectures', 'MISSING')}, "
|
||||
f"id(hf_config)={id(model_config.hf_config)}")
|
||||
# Special handling for quantized Mixtral.
|
||||
# FIXME(woosuk): This is a temporary hack.
|
||||
mixtral_supported = [
|
||||
|
||||
Reference in New Issue
Block a user