Support --enable-llama4-multimodal (#5254)
This commit is contained in:
@@ -43,10 +43,12 @@ class ModelConfig:
|
||||
context_length: Optional[int] = None,
|
||||
model_override_args: Optional[str] = None,
|
||||
is_embedding: Optional[bool] = None,
|
||||
enable_multimodal: Optional[bool] = None,
|
||||
dtype: str = "auto",
|
||||
quantization: Optional[str] = None,
|
||||
override_config_file: Optional[str] = None,
|
||||
) -> None:
|
||||
|
||||
self.model_path = model_path
|
||||
self.revision = revision
|
||||
self.quantization = quantization
|
||||
@@ -70,14 +72,28 @@ class ModelConfig:
|
||||
self.hf_text_config, "attention_chunk_size", None
|
||||
)
|
||||
|
||||
if enable_multimodal is None:
|
||||
if self.hf_config.architectures == "Llama4ForConditionalGeneration":
|
||||
enable_multimodal = False
|
||||
else:
|
||||
enable_multimodal = True
|
||||
|
||||
# Check model type
|
||||
self.is_generation = is_generation_model(
|
||||
self.hf_config.architectures, is_embedding
|
||||
)
|
||||
self.is_multimodal = is_multimodal_model(self.hf_config.architectures)
|
||||
self.is_multimodal_gen = is_multimodal_gen_model(self.hf_config.architectures)
|
||||
self.is_image_gen = is_image_gen_model(self.hf_config.architectures)
|
||||
self.is_audio_model = is_audio_model(self.hf_config.architectures)
|
||||
self.is_multimodal = enable_multimodal and is_multimodal_model(
|
||||
self.hf_config.architectures
|
||||
)
|
||||
self.is_multimodal_gen = enable_multimodal and is_multimodal_gen_model(
|
||||
self.hf_config.architectures
|
||||
)
|
||||
self.is_image_gen = enable_multimodal and is_image_gen_model(
|
||||
self.hf_config.architectures
|
||||
)
|
||||
self.is_audio_model = enable_multimodal and is_audio_model(
|
||||
self.hf_config.architectures
|
||||
)
|
||||
self.is_encoder_decoder = is_encoder_decoder_model(self.hf_config.architectures)
|
||||
self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user