This commit is contained in:
2025-10-09 16:47:16 +08:00
parent c8feb4deb5
commit e27e3f16bb
5248 changed files with 1778505 additions and 0 deletions

View File

@@ -0,0 +1,177 @@
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import SizeDict
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import Ovis2ImageProcessor
if is_torchvision_available():
from transformers import Ovis2ImageProcessorFast
class Ovis2ImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
do_pad=False,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_pad": self.do_pad,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class Ovis2ProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Ovis2ImageProcessor if is_vision_available() else None
fast_image_processing_class = Ovis2ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Ovis2ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
def test_slow_fast_equivalence_crop_to_patches(self):
dummy_image = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)[0]
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
# torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
def test_slow_fast_equivalence_batched_crop_to_patches(self):
# Prepare image inputs so that we have two groups of images with equal resolution with a group of images with
# different resolutions in between
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
# torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
def test_crop_to_patches(self):
# test slow image processor
image_processor = self.image_processor_list[0](**self.image_processor_dict)
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)[0]
processed_images, grid = image_processor.crop_image_to_patches(
image,
min_patches=1,
max_patches=6,
patch_size={"height": 20, "width": 20},
)
self.assertEqual(len(processed_images), 5)
self.assertEqual(processed_images[0].shape[:2], (20, 20))
self.assertEqual(len(grid), 2) # (row, col)
# test fast image processor (process batch)
image_processor = self.image_processor_list[1](**self.image_processor_dict)
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)[0]
processed_images, grid = image_processor.crop_image_to_patches(
image.unsqueeze(0),
min_patches=1,
max_patches=6,
patch_size=SizeDict(height=20, width=20),
)
self.assertEqual(len(processed_images[0]), 5)
self.assertEqual(processed_images.shape[-2:], (20, 20))
self.assertEqual(len(grid[0]), 2)

View File

@@ -0,0 +1,384 @@
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests
from transformers import (
AutoProcessor,
Ovis2Config,
Ovis2ForConditionalGeneration,
Ovis2Model,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
cleanup,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
floats_tensor,
ids_tensor,
)
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
class Ovis2VisionText2TextModelTester:
def __init__(
self,
parent,
seq_length=7,
text_config={
"model_type": "qwen2",
"seq_length": 7,
"is_training": True,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 54,
"hidden_act": "gelu",
"max_position_embeddings": 580,
"initializer_range": 0.02,
"num_labels": 3,
"pad_token_id": 0,
},
is_training=True,
vision_config={
"image_size": 32,
"patch_size": 8,
"num_channels": 3,
"hidden_size": 64,
"vocab_size": 99,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 54,
"attention_dropout": 0.0,
"hidden_act": "silu",
"qkv_bias": False,
"hidden_stride": 2,
"tokenize_function": "softmax",
},
image_token_id=1,
visual_indicator_token_ids=[],
vocab_size=99,
hidden_size=64,
ignore_id=-100,
):
self.parent = parent
self.text_config = text_config
self.vision_config = vision_config
self.image_token_id = image_token_id
self.visual_indicator_token_ids = visual_indicator_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.image_seq_length = (
vision_config["image_size"] // (vision_config["patch_size"] * vision_config["hidden_stride"])
) ** 2
self.seq_length = seq_length + self.image_seq_length
self.is_training = is_training
self.num_attention_heads = text_config["num_attention_heads"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.pad_token_id = text_config["pad_token_id"]
self.ignore_id = ignore_id
self.batch_size = 3
self.num_channels = 3
def get_config(self):
return Ovis2Config(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
visual_indicator_token_ids=self.visual_indicator_token_ids,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
vocab_range = self.vocab_size - 2
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_range) + 2
input_ids[:, : self.image_seq_length] = config.image_token_id
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
labels[:, : self.image_seq_length] = self.ignore_id
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
}
return config, inputs_dict
@require_torch
class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `Ovis2ForConditionalGeneration`.
"""
all_model_classes = (
(
Ovis2Model,
Ovis2ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": Ovis2ForConditionalGeneration} if is_torch_available() else {}
_is_composite = True
test_pruning = False
test_torchscript = False
test_head_masking = False
def setUp(self):
self.model_tester = Ovis2VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=Ovis2Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
@require_torch
@slow
class Ovis2IntegrationTest(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained(
"thisisiron/Ovis2-2B-hf",
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
self.image = Image.open(requests.get(url, stream=True).raw)
self.prompt_image = ""
self.messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What do you see in this image?"},
],
}
]
self.text = self.processor.apply_chat_template(self.messages, add_generation_prompt=True, tokenize=False)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_small_model_integration_test(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(images=self.image, text=self.text, return_tensors="pt").to(
torch_device, torch.bfloat16
)
self.assertTrue(inputs.input_ids.shape[1] == 1314) # should expand num-image-tokens times
self.assertTrue(inputs.pixel_values.shape == torch.Size([5, 3, 448, 448]))
inputs = inputs.to(torch_device)
output = model.generate(**inputs, max_new_tokens=64)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
inputs = self.processor(
text=[self.text],
images=self.image,
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = ['system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_multi_image(self):
# related to (#29835)
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What do you see in these images?"},
],
}
]
text = self.processor.apply_chat_template(prompt, add_generation_prompt=True, tokenize=False)
inputs = self.processor(text=text, images=[self.image, image], return_tensors="pt").to(
torch_device, torch.bfloat16
)
output = model.generate(**inputs, max_new_tokens=40)
EXPECTED_DECODED_TEXT = 'system\nYou are a helpful assistant.\nuser\n\n\nWhat do you see in these images?\nassistant\nIn the first image, I see two cats lying on a pink blanket with remote controls nearby. The second image shows a dog standing on a wooden floor near a kitchen cabinet.' # fmt: skip
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_different_resolutions(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf", dtype="bfloat16", device_map=torch_device
)
lowres_url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw).resize((320, 240))
inputs = self.processor(
text=[self.text, self.text],
images=[lowres_img, self.image],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = [
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nAnswer: I see a brown dog standing on a wooden floor in what appears to be a kitchen.',
'system\nYou are a helpful assistant.\nuser\n\nWhat do you see in this image?\nassistant\nI see two cats lying on a pink blanket. There are also two remote controls on the blanket.'
] # fmt: skip
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
def test_small_model_integration_test_batch_matches_single(self):
model = Ovis2ForConditionalGeneration.from_pretrained(
"thisisiron/Ovis2-2B-hf",
dtype="bfloat16",
device_map=torch_device,
)
lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e"
lowres_img = Image.open(requests.get(lowres_url, stream=True).raw)
inputs_batched = self.processor(
text=[self.text, self.text],
images=[self.image, lowres_img],
return_tensors="pt",
padding=True,
).to(torch_device, torch.bfloat16)
inputs_single = self.processor(text=self.text, images=self.image, return_tensors="pt", padding=True).to(
torch_device, torch.bfloat16
)
output_batched = model.generate(**inputs_batched, max_new_tokens=50)
output_single = model.generate(**inputs_single, max_new_tokens=50)
self.assertEqual(
self.processor.decode(output_batched[0], skip_special_tokens=True),
self.processor.decode(output_single[0], skip_special_tokens=True),
)

View File

@@ -0,0 +1,118 @@
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers.testing_utils import require_av, require_vision
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
Ovis2ImageProcessor,
Ovis2Processor,
Qwen2TokenizerFast,
)
@require_vision
class Ovis2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Ovis2Processor
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = Ovis2ImageProcessor()
tokenizer = Qwen2TokenizerFast.from_pretrained("thisisiron/Ovis2-1B-hf")
processor_kwargs = self.prepare_processor_dict()
processor = Ovis2Processor(image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def prepare_processor_dict(self):
return {
"chat_template": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n'}}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<image>\n' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{'<|im_end|>\n'}}{% endfor %}{% if add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}",
} # fmt: skip
def test_processor_to_json_string(self):
processor = self.get_processor()
obj = json.loads(processor.to_json_string())
for key, value in self.prepare_processor_dict().items():
# chat_tempalate are tested as a separate test because they are saved in separate files
if key != "chat_template":
self.assertEqual(obj[key], value)
self.assertEqual(getattr(processor, key, None), value)
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_chat_template(self):
processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf")
expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n"
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
self.assertEqual(expected_prompt, formatted_prompt)
@require_av
def test_chat_template_dict(self):
processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf")
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = [[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 27, 1805, 397, 3838, 374, 6839, 304, 419, 2168, 30, 151645, 198, 151644, 77091, 198]] # fmt: skip
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])