init
This commit is contained in:
0
transformers/tests/models/aria/__init__.py
Normal file
0
transformers/tests/models/aria/__init__.py
Normal file
320
transformers/tests/models/aria/test_image_processing_aria.py
Normal file
320
transformers/tests/models/aria/test_image_processing_aria.py
Normal file
@@ -0,0 +1,320 @@
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_utils import ChannelDimension, PILImageResampling
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import AriaImageProcessor
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
class AriaImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
num_images=1,
|
||||
min_resolution=30,
|
||||
max_resolution=40,
|
||||
size=None,
|
||||
max_image_size=980,
|
||||
min_image_size=336,
|
||||
split_resolutions=None,
|
||||
split_image=True,
|
||||
do_normalize=True,
|
||||
image_mean=[0.5, 0.5, 0.5],
|
||||
image_std=[0.5, 0.5, 0.5],
|
||||
do_convert_rgb=True,
|
||||
resample=PILImageResampling.BICUBIC,
|
||||
):
|
||||
self.size = size if size is not None else {"longest_edge": max_resolution}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.num_images = num_images
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.resample = resample
|
||||
self.max_image_size = max_image_size
|
||||
self.min_image_size = min_image_size
|
||||
self.split_resolutions = split_resolutions if split_resolutions is not None else [[980, 980]]
|
||||
self.split_image = split_image
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"max_image_size": self.max_image_size,
|
||||
"min_image_size": self.min_image_size,
|
||||
"split_resolutions": self.split_resolutions,
|
||||
"split_image": self.split_image,
|
||||
"do_convert_rgb": self.do_convert_rgb,
|
||||
"do_normalize": self.do_normalize,
|
||||
"resample": self.resample,
|
||||
}
|
||||
|
||||
def get_expected_values(self, image_inputs, batched=False):
|
||||
"""
|
||||
This function computes the expected height and width when providing images to AriaImageProcessor,
|
||||
assuming do_resize is set to True. The expected size in that case the max image size.
|
||||
"""
|
||||
return self.max_image_size, self.max_image_size
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
height, width = self.get_expected_values(images, batched=True)
|
||||
return self.num_channels, height, width
|
||||
|
||||
def prepare_image_inputs(
|
||||
self,
|
||||
batch_size=None,
|
||||
min_resolution=None,
|
||||
max_resolution=None,
|
||||
num_channels=None,
|
||||
num_images=None,
|
||||
size_divisor=None,
|
||||
equal_resolution=False,
|
||||
numpify=False,
|
||||
torchify=False,
|
||||
):
|
||||
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
|
||||
or a list of PyTorch tensors if one specifies torchify=True.
|
||||
|
||||
One can specify whether the images are of the same resolution or not.
|
||||
"""
|
||||
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
|
||||
|
||||
batch_size = batch_size if batch_size is not None else self.batch_size
|
||||
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
|
||||
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
|
||||
num_channels = num_channels if num_channels is not None else self.num_channels
|
||||
num_images = num_images if num_images is not None else self.num_images
|
||||
|
||||
images_list = []
|
||||
for i in range(batch_size):
|
||||
images = []
|
||||
for j in range(num_images):
|
||||
if equal_resolution:
|
||||
width = height = max_resolution
|
||||
else:
|
||||
# To avoid getting image width/height 0
|
||||
if size_divisor is not None:
|
||||
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
|
||||
min_resolution = max(size_divisor, min_resolution)
|
||||
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
|
||||
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
|
||||
images_list.append(images)
|
||||
|
||||
if not numpify and not torchify:
|
||||
# PIL expects the channel dimension as last dimension
|
||||
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
|
||||
|
||||
if torchify:
|
||||
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
|
||||
|
||||
if numpify:
|
||||
# Numpy images are typically in channels last format
|
||||
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
|
||||
|
||||
return images_list
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class AriaImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = AriaImageProcessor if is_vision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = AriaImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
|
||||
self.assertTrue(hasattr(image_processing, "max_image_size"))
|
||||
self.assertTrue(hasattr(image_processing, "min_image_size"))
|
||||
self.assertTrue(hasattr(image_processing, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processing, "image_mean"))
|
||||
self.assertTrue(hasattr(image_processing, "image_std"))
|
||||
self.assertTrue(hasattr(image_processing, "split_image"))
|
||||
|
||||
def test_call_numpy(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
for sample_images in image_inputs:
|
||||
for image in sample_images:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
|
||||
)
|
||||
|
||||
def test_call_numpy_4_channels(self):
|
||||
# Aria always processes images as RGB, so it always returns images with 3 channels
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processor_dict = self.image_processor_dict
|
||||
image_processing = self.image_processing_class(**image_processor_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
|
||||
for sample_images in image_inputs:
|
||||
for image in sample_images:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
|
||||
)
|
||||
|
||||
def test_call_pil(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL images
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
|
||||
for images in image_inputs:
|
||||
for image in images:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
|
||||
)
|
||||
|
||||
def test_call_pytorch(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
|
||||
for images in image_inputs:
|
||||
for image in images:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape),
|
||||
(self.image_processor_tester.batch_size, *expected_output_image_shape),
|
||||
)
|
||||
|
||||
def test_pad_for_patching(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
if image_processing_class == self.fast_image_processing_class:
|
||||
numpify = False
|
||||
torchify = True
|
||||
input_data_format = image_processing_class.data_format
|
||||
else:
|
||||
numpify = True
|
||||
torchify = False
|
||||
input_data_format = ChannelDimension.LAST
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# Create odd-sized images
|
||||
image_input = self.image_processor_tester.prepare_image_inputs(
|
||||
batch_size=1,
|
||||
max_resolution=400,
|
||||
num_images=1,
|
||||
equal_resolution=True,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)[0][0]
|
||||
self.assertIn(image_input.shape, [(3, 400, 400), (400, 400, 3)])
|
||||
|
||||
# Test odd-width
|
||||
image_shape = (400, 601)
|
||||
encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format)
|
||||
encoded_image_shape = (
|
||||
encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:]
|
||||
)
|
||||
self.assertEqual(encoded_image_shape, image_shape)
|
||||
|
||||
# Test odd-height
|
||||
image_shape = (503, 400)
|
||||
encoded_images = image_processing._pad_for_patching(image_input, image_shape, input_data_format)
|
||||
encoded_image_shape = (
|
||||
encoded_images.shape[:-1] if input_data_format == ChannelDimension.LAST else encoded_images.shape[1:]
|
||||
)
|
||||
self.assertEqual(encoded_image_shape, image_shape)
|
||||
|
||||
def test_get_num_patches_without_images(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={})
|
||||
self.assertEqual(num_patches, 1)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=300, width=500, images_kwargs={"split_image": True}
|
||||
)
|
||||
self.assertEqual(num_patches, 1)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=100, width=100, images_kwargs={"split_image": True, "max_image_size": 200}
|
||||
)
|
||||
self.assertEqual(num_patches, 19)
|
||||
516
transformers/tests/models/aria/test_modeling_aria.py
Normal file
516
transformers/tests/models/aria/test_modeling_aria.py
Normal file
@@ -0,0 +1,516 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch Aria model."""
|
||||
|
||||
import unittest
|
||||
|
||||
import requests
|
||||
|
||||
from transformers import (
|
||||
AriaConfig,
|
||||
AriaForConditionalGeneration,
|
||||
AriaModel,
|
||||
AriaTextConfig,
|
||||
AutoProcessor,
|
||||
AutoTokenizer,
|
||||
BitsAndBytesConfig,
|
||||
is_torch_available,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.models.idefics3 import Idefics3VisionConfig
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
cleanup,
|
||||
require_bitsandbytes,
|
||||
require_torch,
|
||||
require_torch_large_accelerator,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
# Used to be https://aria-vl.github.io/static/images/view.jpg but it was removed, llava-vl has the same image
|
||||
IMAGE_OF_VIEW_URL = "https://llava-vl.github.io/static/images/view.jpg"
|
||||
|
||||
|
||||
class AriaVisionText2TextModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=13,
|
||||
num_channels=3,
|
||||
image_size=16,
|
||||
num_image_tokens=4,
|
||||
ignore_index=-100,
|
||||
image_token_index=9,
|
||||
projector_hidden_act="gelu",
|
||||
seq_length=7,
|
||||
vision_feature_select_strategy="default",
|
||||
vision_feature_layer=-1,
|
||||
text_config=AriaTextConfig(
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_input_mask=True,
|
||||
use_token_type_ids=False,
|
||||
use_labels=True,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.1,
|
||||
attention_probs_dropout_prob=0.1,
|
||||
type_vocab_size=16,
|
||||
type_sequence_label_size=2,
|
||||
initializer_range=0.02,
|
||||
num_labels=3,
|
||||
num_choices=4,
|
||||
pad_token_id=1,
|
||||
hidden_size=32,
|
||||
intermediate_size=16,
|
||||
max_position_embeddings=60,
|
||||
model_type="aria_moe_lm",
|
||||
moe_intermediate_size=4,
|
||||
moe_num_experts=3,
|
||||
moe_topk=2,
|
||||
num_attention_heads=2,
|
||||
num_experts_per_tok=3,
|
||||
num_hidden_layers=2,
|
||||
num_key_value_heads=2,
|
||||
rope_theta=5000000,
|
||||
vocab_size=99,
|
||||
eos_token_id=2,
|
||||
head_dim=4,
|
||||
),
|
||||
is_training=True,
|
||||
vision_config=Idefics3VisionConfig(
|
||||
image_size=16,
|
||||
patch_size=8,
|
||||
num_channels=3,
|
||||
is_training=True,
|
||||
hidden_size=32,
|
||||
projection_dim=4,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=2,
|
||||
intermediate_size=4,
|
||||
dropout=0.1,
|
||||
attention_dropout=0.1,
|
||||
initializer_range=0.02,
|
||||
),
|
||||
):
|
||||
self.parent = parent
|
||||
self.ignore_index = ignore_index
|
||||
self.image_token_index = image_token_index
|
||||
self.projector_hidden_act = projector_hidden_act
|
||||
self.vision_feature_select_strategy = vision_feature_select_strategy
|
||||
self.vision_feature_layer = vision_feature_layer
|
||||
self.text_config = text_config
|
||||
self.vision_config = vision_config
|
||||
self.pad_token_id = text_config.pad_token_id
|
||||
self.eos_token_id = text_config.eos_token_id
|
||||
self.num_hidden_layers = text_config.num_hidden_layers
|
||||
self.vocab_size = text_config.vocab_size
|
||||
self.hidden_size = text_config.hidden_size
|
||||
self.num_attention_heads = text_config.num_attention_heads
|
||||
self.is_training = is_training
|
||||
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.num_image_tokens = num_image_tokens
|
||||
self.seq_length = seq_length + self.num_image_tokens
|
||||
self.projector_patch_to_query_dict = {
|
||||
vision_config.image_size**2 // vision_config.patch_size**2: vision_config.projection_dim
|
||||
}
|
||||
|
||||
def get_config(self):
|
||||
return AriaConfig(
|
||||
text_config=self.text_config.to_dict(),
|
||||
vision_config=self.vision_config.to_dict(),
|
||||
ignore_index=self.ignore_index,
|
||||
image_token_index=self.image_token_index,
|
||||
projector_hidden_act=self.projector_hidden_act,
|
||||
vision_feature_select_strategy=self.vision_feature_select_strategy,
|
||||
vision_feature_layer=self.vision_feature_layer,
|
||||
eos_token_id=self.eos_token_id,
|
||||
projector_patch_to_query_dict=self.projector_patch_to_query_dict,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor(
|
||||
[
|
||||
self.batch_size,
|
||||
self.vision_config.num_channels,
|
||||
self.vision_config.image_size,
|
||||
self.vision_config.image_size,
|
||||
]
|
||||
)
|
||||
config = self.get_config()
|
||||
|
||||
return config, pixel_values
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values = config_and_inputs
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
|
||||
attention_mask = input_ids.ne(1).to(torch_device)
|
||||
input_ids[input_ids == config.image_token_index] = self.pad_token_id
|
||||
input_ids[:, : self.num_image_tokens] = config.image_token_index
|
||||
inputs_dict = {
|
||||
"pixel_values": pixel_values,
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class AriaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
|
||||
"""
|
||||
Model tester for `AriaForConditionalGeneration`.
|
||||
"""
|
||||
|
||||
all_model_classes = (AriaModel, AriaForConditionalGeneration) if is_torch_available() else ()
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
test_torchscript = False
|
||||
_is_composite = True
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = AriaVisionText2TextModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=AriaConfig, has_text_modality=False)
|
||||
|
||||
@unittest.skip(reason="Unstable test")
|
||||
def test_initialization(self):
|
||||
pass
|
||||
|
||||
|
||||
SKIP = False
|
||||
torch_accelerator_module = getattr(torch, torch_device)
|
||||
memory = 23 # skip on T4 / A10
|
||||
if hasattr(torch_accelerator_module, "get_device_properties"):
|
||||
if torch_accelerator_module.get_device_properties(0).total_memory / 1024**3 < memory:
|
||||
SKIP = True
|
||||
|
||||
|
||||
@unittest.skipIf(SKIP, reason="A10 doesn't have enough GPU memory for this tests")
|
||||
@require_torch
|
||||
class AriaForConditionalGenerationIntegrationTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.processor = AutoProcessor.from_pretrained("rhymes-ai/Aria")
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
def tearDown(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_small_model_integration_test(self):
|
||||
# Let's make sure we test the preprocessing to replace what is used
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
"rhymes-ai/Aria",
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
|
||||
prompt = "<|img|>\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT:"
|
||||
raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
|
||||
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype)
|
||||
|
||||
non_img_tokens = [
|
||||
109, 3905, 2000, 93415, 4551, 1162, 901, 3894, 970, 2478, 1017, 19312, 2388, 1596, 1809, 970, 5449, 1235,
|
||||
3333, 93483, 109, 61081, 11984, 14800, 93415
|
||||
] # fmt: skip
|
||||
EXPECTED_INPUT_IDS = torch.tensor([[9] * 256 + non_img_tokens]).to(inputs["input_ids"].device)
|
||||
self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS))
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=20)
|
||||
decoded_output = self.processor.decode(output[0], skip_special_tokens=True)
|
||||
|
||||
expected_output = Expectations(
|
||||
{
|
||||
(
|
||||
"cuda",
|
||||
None,
|
||||
): "\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT: When visiting this place, there are a few things one should be cautious about. Firstly,",
|
||||
(
|
||||
"rocm",
|
||||
(9, 5),
|
||||
): "\n USER: What are the things I should be cautious about when I visit this place?\n ASSISTANT: When you visit this place, you should be cautious about the following things:\n\n- The",
|
||||
}
|
||||
).get_expectation()
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_small_model_integration_test_llama_single(self):
|
||||
# Let's make sure we test the preprocessing to replace what is used
|
||||
model_id = "rhymes-ai/Aria"
|
||||
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
|
||||
prompt = "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? ASSISTANT:"
|
||||
raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
|
||||
inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=90, do_sample=False)
|
||||
EXPECTED_DECODED_TEXT = Expectations(
|
||||
{
|
||||
("cuda", (8, 0)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: When visiting this beautiful location, it's important to be mindful of a few things to ensure both your safety and the preservation of the environment. Firstly, always be cautious when walking on the wooden pier, as it can be slippery, especially during or after rain. Secondly, be aware of the local wildlife and do not feed or disturb them. Lastly, respect the natural surroundings by not littering and sticking to",
|
||||
("rocm", (9, 5)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n1. **Weather Conditions**: The weather can be unpredictable, so it's important to check the forecast and dress in layers. Sudden changes in weather can occur, so be prepared for rain or cold temperatures.\n\n2. **Safety on the Dock**: The dock may be slippery, especially when",
|
||||
}
|
||||
).get_expectation() # fmt: off
|
||||
|
||||
decoded_output = processor.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
||||
self.assertEqual(
|
||||
decoded_output,
|
||||
EXPECTED_DECODED_TEXT,
|
||||
f"Expected: {repr(EXPECTED_DECODED_TEXT)}\nActual: {repr(decoded_output)}",
|
||||
)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_small_model_integration_test_llama_batched(self):
|
||||
# Let's make sure we test the preprocessing to replace what is used
|
||||
model_id = "rhymes-ai/Aria"
|
||||
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
|
||||
prompts = [
|
||||
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT:",
|
||||
"USER: <|img|>\nWhat is this? ASSISTANT:",
|
||||
]
|
||||
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
|
||||
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
|
||||
|
||||
inputs = processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to(
|
||||
model.device, model.dtype
|
||||
)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=20)
|
||||
|
||||
EXPECTED_DECODED_TEXT = Expectations(
|
||||
{
|
||||
("cuda", None): [
|
||||
"USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: When visiting this place, which is a pier or dock extending over a body of water, you",
|
||||
"USER: \nWhat is this? ASSISTANT: The image features two cats lying down on a pink couch. One cat is located on",
|
||||
],
|
||||
("rocm", (9, 5)): [
|
||||
"USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it",
|
||||
"USER: \n What is this? ASSISTANT: This is a picture of two cats sleeping on a couch. USER: What is the color of",
|
||||
],
|
||||
}
|
||||
).get_expectation()
|
||||
|
||||
decoded_output = processor.batch_decode(output, skip_special_tokens=True)
|
||||
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_small_model_integration_test_batch(self):
|
||||
# Let's make sure we test the preprocessing to replace what is used
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
"rhymes-ai/Aria",
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
# The first batch is longer in terms of text, but only has 1 image. The second batch will be padded in text, but the first will be padded because images take more space!.
|
||||
prompts = [
|
||||
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:",
|
||||
"USER: <|img|>\nWhat is this?\nASSISTANT:",
|
||||
]
|
||||
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
|
||||
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
|
||||
|
||||
inputs = self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to(
|
||||
model.device, model.dtype
|
||||
)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=20)
|
||||
|
||||
EXPECTED_DECODED_TEXT = Expectations({
|
||||
("cuda", None): [
|
||||
'USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, there are a few things to be cautious about and items to bring.',
|
||||
'USER: \nWhat is this?\nASSISTANT: Cats',
|
||||
],
|
||||
("rocm", (9, 5)): [
|
||||
'USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n-',
|
||||
'USER: \n What is this?\n ASSISTANT: This is a picture of two cats sleeping on a couch. The couch is red, and the cats',
|
||||
],
|
||||
}).get_expectation() # fmt: skip
|
||||
|
||||
decoded_output = self.processor.batch_decode(output, skip_special_tokens=True)
|
||||
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_small_model_integration_test_llama_batched_regression(self):
|
||||
# Let's make sure we test the preprocessing to replace what is used
|
||||
model_id = "rhymes-ai/Aria"
|
||||
|
||||
# Multi-image & multi-prompt (e.g. 3 images and 2 prompts now fails with SDPA, this tests if "eager" works as before)
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(model_id, pad_token="<pad>")
|
||||
|
||||
prompts = [
|
||||
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:",
|
||||
"USER: <|img|>\nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: <|img|>\nAnd this?\nASSISTANT:",
|
||||
]
|
||||
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
|
||||
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
|
||||
|
||||
inputs = processor(images=[image1, image2, image1], text=prompts, return_tensors="pt", padding=True)
|
||||
inputs = inputs.to(model.device, model.dtype)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=20)
|
||||
|
||||
EXPECTED_DECODED_TEXT = Expectations({
|
||||
("cuda", None): ['USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, which appears to be a dock or pier extending over a body of water', 'USER: \nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: \nAnd this?\nASSISTANT: A cat sleeping on a bed.'],
|
||||
("rocm", (9, 5)): ['USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it', 'USER: \n What is this?\n ASSISTANT: Two cats lying on a bed!\n USER: \n And this?\n ASSISTANT: A serene lake scene with a wooden dock extending into the water.\n USER: \n']
|
||||
}).get_expectation() # fmt: skip
|
||||
|
||||
decoded_output = processor.batch_decode(output, skip_special_tokens=True)
|
||||
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_vision
|
||||
@require_bitsandbytes
|
||||
def test_batched_generation(self):
|
||||
# Skip multihead_attn for 4bit because MHA will read the original weight without dequantize.
|
||||
# See https://github.com/huggingface/transformers/pull/37444#discussion_r2045852538.
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
"rhymes-ai/Aria",
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained("rhymes-ai/Aria")
|
||||
|
||||
prompt1 = "<image>\n<image>\nUSER: What's the difference of two images?\nASSISTANT:"
|
||||
prompt2 = "<image>\nUSER: Describe the image.\nASSISTANT:"
|
||||
prompt3 = "<image>\nUSER: Describe the image.\nASSISTANT:"
|
||||
url1 = "https://images.unsplash.com/photo-1552053831-71594a27632d?q=80&w=3062&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
|
||||
url2 = "https://images.unsplash.com/photo-1617258683320-61900b281ced?q=80&w=3087&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
|
||||
image1 = Image.open(requests.get(url1, stream=True).raw)
|
||||
image2 = Image.open(requests.get(url2, stream=True).raw)
|
||||
|
||||
# Create inputs
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": prompt1},
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": prompt2},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": prompt3},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
|
||||
images = [[image1, image2], [image2]]
|
||||
inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(
|
||||
device=model.device, dtype=model.dtype
|
||||
)
|
||||
|
||||
EXPECTED_OUTPUTS = Expectations(
|
||||
{
|
||||
("cpu", None): [
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a grassy hill. The alpaca has",
|
||||
],
|
||||
("cuda", None): [
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The",
|
||||
],
|
||||
("xpu", 3): [
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The",
|
||||
],
|
||||
("rocm", (9, 5)): [
|
||||
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image shows a cute golden retriever puppy sitting on a paved surface with a stick",
|
||||
'<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young llama standing on a patch of ground with some dry grass and dirt. The'
|
||||
],
|
||||
}
|
||||
) # fmt: skip
|
||||
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
|
||||
generate_ids = model.generate(**inputs, max_new_tokens=20)
|
||||
outputs = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
||||
self.assertListEqual(outputs, EXPECTED_OUTPUT)
|
||||
|
||||
def test_tokenizer_integration(self):
|
||||
model_id = "rhymes-ai/Aria"
|
||||
slow_tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_id, bos_token="<|startoftext|>", eos_token="<|endoftext|>", use_fast=False
|
||||
)
|
||||
slow_tokenizer.add_tokens("<image>", True)
|
||||
|
||||
fast_tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_id,
|
||||
bos_token="<|startoftext|>",
|
||||
eos_token="<|endoftext|>",
|
||||
from_slow=True,
|
||||
legacy=False,
|
||||
)
|
||||
fast_tokenizer.add_tokens("<image>", True)
|
||||
|
||||
prompt = "<|startoftext|><|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>"
|
||||
EXPECTED_OUTPUT = ['<|startoftext|>', '<', '|', 'im', '_', 'start', '|', '>', 'system', '\n', 'Answer', '▁the', '▁questions', '.<', '|', 'im', '_', 'end', '|', '><', '|', 'im', '_', 'start', '|', '>', 'user', '\n', '<image>', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<', '|', 'im', '_', 'end', '|', '>'] # fmt: skip
|
||||
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
|
||||
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
|
||||
|
||||
@slow
|
||||
@require_torch_large_accelerator
|
||||
@require_bitsandbytes
|
||||
def test_generation_no_images(self):
|
||||
model_id = "rhymes-ai/Aria"
|
||||
model = AriaForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
assert model.device.type == "cuda", "This test is only supported on CUDA" # TODO: remove this
|
||||
# Prepare inputs with no images
|
||||
inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device)
|
||||
|
||||
# Make sure that `generate` works
|
||||
_ = model.generate(**inputs, max_new_tokens=20)
|
||||
310
transformers/tests/models/aria/test_processing_aria.py
Normal file
310
transformers/tests/models/aria/test_processing_aria.py
Normal file
@@ -0,0 +1,310 @@
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import AriaProcessor
|
||||
from transformers.image_utils import load_image
|
||||
from transformers.models.auto.processing_auto import AutoProcessor
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class AriaProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
processor_class = AriaProcessor
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tmpdirname = tempfile.mkdtemp()
|
||||
processor = AriaProcessor.from_pretrained("m-ric/Aria_hf_2", size_conversion={490: 2, 980: 2})
|
||||
processor.save_pretrained(cls.tmpdirname)
|
||||
cls.image1 = load_image(
|
||||
url_to_local_path(
|
||||
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
||||
)
|
||||
)
|
||||
cls.image2 = load_image(
|
||||
url_to_local_path("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
|
||||
)
|
||||
cls.image3 = load_image(
|
||||
url_to_local_path(
|
||||
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
|
||||
)
|
||||
)
|
||||
cls.bos_token = "<|im_start|>"
|
||||
cls.eos_token = "<|im_end|>"
|
||||
|
||||
cls.image_token = processor.tokenizer.image_token
|
||||
cls.fake_image_token = "o"
|
||||
cls.global_img_token = "<|img|>"
|
||||
|
||||
cls.bos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.bos_token)
|
||||
cls.eos_token_id = processor.tokenizer.convert_tokens_to_ids(cls.eos_token)
|
||||
|
||||
cls.image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.image_token)
|
||||
cls.fake_image_token_id = processor.tokenizer.convert_tokens_to_ids(cls.fake_image_token)
|
||||
cls.global_img_tokens_id = processor.tokenizer(cls.global_img_token, add_special_tokens=False)["input_ids"]
|
||||
cls.padding_token_id = processor.tokenizer.pad_token_id
|
||||
cls.image_seq_len = 2
|
||||
|
||||
@staticmethod
|
||||
def prepare_processor_dict():
|
||||
return {
|
||||
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}{% elif message['content'] is iterable %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<fim_prefix><|img|><fim_suffix>{% endif %}{% endfor %}{% endif %}<|im_end|>\n{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
|
||||
"size_conversion": {490: 2, 980: 2},
|
||||
} # fmt: skip
|
||||
|
||||
def get_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
|
||||
|
||||
def get_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.image1.close()
|
||||
cls.image2.close()
|
||||
cls.image3.close()
|
||||
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
||||
|
||||
# Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens
|
||||
def test_get_num_vision_tokens(self):
|
||||
"Tests general functionality of the helper used internally in vLLM"
|
||||
|
||||
processor = self.get_processor()
|
||||
|
||||
output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)])
|
||||
self.assertTrue("num_image_tokens" in output)
|
||||
self.assertEqual(len(output["num_image_tokens"]), 3)
|
||||
|
||||
self.assertTrue("num_image_patches" in output)
|
||||
self.assertEqual(len(output["num_image_patches"]), 3)
|
||||
|
||||
def test_process_interleaved_images_prompts_image_splitting(self):
|
||||
processor = self.get_processor()
|
||||
processor.image_processor.split_image = True
|
||||
|
||||
# Test that a single image is processed correctly
|
||||
inputs = processor(images=self.image1, text="Ok<|img|>", images_kwargs={"split_image": True})
|
||||
self.assertEqual(np.array(inputs["pixel_values"]).shape, (2, 3, 980, 980))
|
||||
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (2, 980, 980))
|
||||
|
||||
def test_process_interleaved_images_prompts_no_image_splitting(self):
|
||||
processor = self.get_processor()
|
||||
processor.image_processor.split_image = False
|
||||
|
||||
# Test that a single image is processed correctly
|
||||
inputs = processor(images=self.image1, text="Ok<|img|>")
|
||||
image1_expected_size = (980, 980)
|
||||
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 3, *image1_expected_size))
|
||||
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (1, *image1_expected_size))
|
||||
# fmt: on
|
||||
|
||||
# Test a single sample with image and text
|
||||
image_str = "<|img|>"
|
||||
text_str = "In this image, we see"
|
||||
text = image_str + text_str
|
||||
inputs = processor(text=text, images=self.image1)
|
||||
|
||||
# fmt: off
|
||||
tokenized_sentence = processor.tokenizer(text_str, add_special_tokens=False)
|
||||
|
||||
expected_input_ids = [[self.image_token_id] * self.image_seq_len + tokenized_sentence["input_ids"]]
|
||||
# self.assertEqual(len(inputs["input_ids"]), len(expected_input_ids))
|
||||
|
||||
self.assertEqual(inputs["input_ids"], expected_input_ids)
|
||||
self.assertEqual(inputs["attention_mask"], [[1] * len(expected_input_ids[0])])
|
||||
self.assertEqual(np.array(inputs["pixel_values"]).shape, (1, 3, *image1_expected_size))
|
||||
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (1, *image1_expected_size))
|
||||
# fmt: on
|
||||
|
||||
# Test that batch is correctly processed
|
||||
image_str = "<|img|>"
|
||||
text_str_1 = "In this image, we see"
|
||||
text_str_2 = "In this image, we see"
|
||||
|
||||
text = [
|
||||
image_str + text_str_1,
|
||||
image_str + image_str + text_str_2,
|
||||
]
|
||||
images = [[self.image1], [self.image2, self.image3]]
|
||||
|
||||
inputs = processor(text=text, images=images, padding=True)
|
||||
|
||||
# fmt: off
|
||||
tokenized_sentence_1 = processor.tokenizer(text_str_1, add_special_tokens=False)
|
||||
tokenized_sentence_2 = processor.tokenizer(text_str_2, add_special_tokens=False)
|
||||
|
||||
image_tokens = [self.image_token_id] * self.image_seq_len
|
||||
expected_input_ids_1 = image_tokens + tokenized_sentence_1["input_ids"]
|
||||
expected_input_ids_2 = 2 * image_tokens + tokenized_sentence_2["input_ids"]
|
||||
|
||||
# Pad the first input to match the second input
|
||||
pad_len = len(expected_input_ids_2) - len(expected_input_ids_1)
|
||||
|
||||
expected_attention_mask = [[0] * pad_len + [1] * len(expected_input_ids_1), [1] * (len(expected_input_ids_2))]
|
||||
|
||||
self.assertEqual(
|
||||
inputs["attention_mask"],
|
||||
expected_attention_mask
|
||||
)
|
||||
self.assertEqual(np.array(inputs['pixel_values']).shape, (3, 3, 980, 980))
|
||||
self.assertEqual(np.array(inputs['pixel_mask']).shape, (3, 980, 980))
|
||||
# fmt: on
|
||||
|
||||
def test_non_nested_images_with_batched_text(self):
|
||||
processor = self.get_processor()
|
||||
processor.image_processor.do_image_splitting = False
|
||||
|
||||
image_str = "<|img|>"
|
||||
text_str_1 = "In this image, we see"
|
||||
text_str_2 = "In this image, we see"
|
||||
|
||||
text = [
|
||||
image_str + text_str_1,
|
||||
image_str + image_str + text_str_2,
|
||||
]
|
||||
images = [self.image1, self.image2, self.image3]
|
||||
|
||||
inputs = processor(text=text, images=images, padding=True)
|
||||
|
||||
self.assertEqual(np.array(inputs["pixel_values"]).shape, (3, 3, 980, 980))
|
||||
self.assertEqual(np.array(inputs["pixel_mask"]).shape, (3, 980, 980))
|
||||
|
||||
def test_apply_chat_template(self):
|
||||
# Message contains content which a mix of lists with images and image urls and string
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What do these images show?"},
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
"What do these images show?",
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.",
|
||||
}
|
||||
],
|
||||
},
|
||||
{"role": "user", "content": [{"type": "text", "text": "And who is that?"}]},
|
||||
]
|
||||
processor = self.get_processor()
|
||||
# Make short sequence length to test that the fake tokens are added correctly
|
||||
rendered = processor.apply_chat_template(messages, add_generation_prompt=True)
|
||||
print(rendered)
|
||||
|
||||
expected_rendered = """<|im_start|>user
|
||||
What do these images show?<fim_prefix><|img|><fim_suffix><fim_prefix><|img|><fim_suffix><|im_end|>
|
||||
<|im_start|>assistant
|
||||
The first image shows the statue of Liberty in New York. The second image picture depicts Idefix, the dog of Obelix in Asterix and Obelix.<|im_end|>
|
||||
<|im_start|>user
|
||||
And who is that?<|im_end|>
|
||||
<|im_start|>assistant
|
||||
"""
|
||||
self.assertEqual(rendered, expected_rendered)
|
||||
|
||||
def test_image_chat_template_accepts_processing_kwargs(self):
|
||||
processor = self.get_processor()
|
||||
if processor.chat_template is None:
|
||||
self.skipTest("Processor has no chat template")
|
||||
|
||||
messages = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What is shown in this image?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
]
|
||||
|
||||
formatted_prompt_tokenized = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
padding="max_length",
|
||||
max_length=50,
|
||||
)
|
||||
self.assertEqual(len(formatted_prompt_tokenized[0]), 50)
|
||||
|
||||
formatted_prompt_tokenized = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
truncation=True,
|
||||
max_length=5,
|
||||
)
|
||||
self.assertEqual(len(formatted_prompt_tokenized[0]), 5)
|
||||
|
||||
# Now test the ability to return dict
|
||||
messages[0][0]["content"].append(
|
||||
{
|
||||
"type": "image",
|
||||
"url": url_to_local_path(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
|
||||
),
|
||||
}
|
||||
)
|
||||
out_dict = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
max_image_size=980,
|
||||
return_tensors="np",
|
||||
)
|
||||
self.assertListEqual(list(out_dict[self.images_input_name].shape), [1, 3, 980, 980])
|
||||
|
||||
def test_special_mm_token_truncation(self):
|
||||
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
|
||||
|
||||
processor = self.get_processor()
|
||||
|
||||
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
|
||||
image_input = self.prepare_image_inputs(batch_size=2)
|
||||
|
||||
_ = processor(
|
||||
text=input_str,
|
||||
images=image_input,
|
||||
return_tensors="pt",
|
||||
truncation=None,
|
||||
padding=True,
|
||||
)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
_ = processor(
|
||||
text=input_str,
|
||||
images=image_input,
|
||||
return_tensors="pt",
|
||||
truncation=True,
|
||||
padding=True,
|
||||
max_length=3,
|
||||
)
|
||||
Reference in New Issue
Block a user