fix: remove dependency on latest transformers impl (#3635)
This commit is contained in:
@@ -48,13 +48,16 @@ from transformers.image_utils import (
|
||||
validate_preprocess_arguments,
|
||||
)
|
||||
from transformers.modeling_rope_utils import rope_config_validation
|
||||
from transformers.models.mllama.image_processing_mllama import is_valid_list_of_images
|
||||
from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize
|
||||
from transformers.processing_utils import ProcessingKwargs, Unpack, VideosKwargs
|
||||
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
|
||||
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
|
||||
|
||||
|
||||
def is_valid_list_of_images(images: List):
|
||||
return images and all(is_valid_image(image) for image in images)
|
||||
|
||||
|
||||
class Qwen2_5_VLVisionConfig(PretrainedConfig):
|
||||
model_type = "qwen2_5_vl"
|
||||
base_config_key = "vision_config"
|
||||
@@ -999,5 +1002,5 @@ class Qwen2_5_VLImageProcessor(BaseImageProcessor):
|
||||
return BatchFeature(data=data, tensor_type=return_tensors)
|
||||
|
||||
|
||||
AutoImageProcessor.register(Qwen2_5_VLConfig, Qwen2_5_VLImageProcessor)
|
||||
AutoImageProcessor.register(Qwen2_5_VLConfig, None, Qwen2_5_VLImageProcessor, None)
|
||||
AutoProcessor.register(Qwen2_5_VLConfig, Qwen2_5_VLProcessor)
|
||||
|
||||
Reference in New Issue
Block a user