init
This commit is contained in:
0
transformers/tests/models/kosmos2_5/__init__.py
Normal file
0
transformers/tests/models/kosmos2_5/__init__.py
Normal file
@@ -0,0 +1,423 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from packaging import version
|
||||
|
||||
from transformers.image_utils import load_image
|
||||
from transformers.testing_utils import require_torch, require_torch_accelerator, require_vision, slow, torch_device
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
from ...test_processing_common import url_to_local_path
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import Kosmos2_5ImageProcessor
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import Kosmos2_5ImageProcessorFast
|
||||
|
||||
|
||||
class Kosmos2_5ImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
image_size=18,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
do_convert_rgb=True,
|
||||
patch_size=None,
|
||||
):
|
||||
size = size if size is not None else {"height": 20, "width": 20}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
self.max_patches = [512, 1024, 2048, 4096]
|
||||
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
|
||||
|
||||
def prepare_dummy_image(self):
|
||||
img_url = url_to_local_path(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
|
||||
)
|
||||
raw_image = load_image(img_url).convert("RGB")
|
||||
return raw_image
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class Kosmos2_5ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
|
||||
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
|
||||
# TODO: enhance the common test to avoid overwriting
|
||||
@require_vision
|
||||
@require_torch
|
||||
def test_slow_fast_equivalence(self):
|
||||
if not self.test_slow_image_processor or not self.test_fast_image_processor:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test")
|
||||
|
||||
if self.image_processing_class is None or self.fast_image_processing_class is None:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
|
||||
|
||||
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
|
||||
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
|
||||
self.assertLessEqual(
|
||||
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
|
||||
)
|
||||
|
||||
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
|
||||
# TODO: enhance the common test to avoid overwriting
|
||||
@require_vision
|
||||
@require_torch
|
||||
def test_slow_fast_equivalence_batched(self):
|
||||
if not self.test_slow_image_processor or not self.test_fast_image_processor:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test")
|
||||
|
||||
if self.image_processing_class is None or self.fast_image_processing_class is None:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
|
||||
|
||||
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
|
||||
self.skipTest(
|
||||
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
|
||||
)
|
||||
|
||||
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
|
||||
|
||||
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
|
||||
self.assertLessEqual(
|
||||
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
|
||||
)
|
||||
|
||||
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
|
||||
# TODO: enhance the common test to avoid overwriting + fix this compile test.
|
||||
@unittest.skip("Failing with `AttributeError: 'StrictLessThan' object has no attribute 'diff'`.")
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
@require_vision
|
||||
@pytest.mark.torch_compile_test
|
||||
def test_can_compile_fast_image_processor(self):
|
||||
if self.fast_image_processing_class is None:
|
||||
self.skipTest("Skipping compilation test as fast image processor is not defined")
|
||||
if version.parse(torch.__version__) < version.parse("2.3"):
|
||||
self.skipTest(reason="This test requires torch >= 2.3 to run.")
|
||||
|
||||
torch.compiler.reset()
|
||||
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
|
||||
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
|
||||
image_processor = torch.compile(image_processor, mode="reduce-overhead")
|
||||
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
self._assert_slow_fast_tensors_equivalence(
|
||||
output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4, rtol=1e-4, mean_atol=1e-5
|
||||
)
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processor, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
|
||||
|
||||
def test_expected_patches(self):
|
||||
dummy_image = self.image_processor_tester.prepare_dummy_image()
|
||||
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
max_patch = 2048
|
||||
|
||||
inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch)
|
||||
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606), atol=1e-3, rtol=1e-3))
|
||||
|
||||
def test_call_pil(self):
|
||||
# Initialize image_processor
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL images
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
expected_hidden_dim = (
|
||||
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
|
||||
* self.image_processor_tester.num_channels
|
||||
) + 2
|
||||
|
||||
for max_patch in self.image_processor_tester.max_patches:
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0], return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs, return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
def test_call_numpy(self):
|
||||
# Initialize image_processor
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
expected_hidden_dim = (
|
||||
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
|
||||
* self.image_processor_tester.num_channels
|
||||
) + 2
|
||||
|
||||
for max_patch in self.image_processor_tester.max_patches:
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0], return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs, return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
def test_call_numpy_4_channels(self):
|
||||
# Initialize image_processor
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
self.image_processor_tester.num_channels = 4
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
expected_hidden_dim = (
|
||||
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
|
||||
* self.image_processor_tester.num_channels
|
||||
) + 2
|
||||
|
||||
for max_patch in self.image_processor_tester.max_patches:
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0], return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs, return_tensors="pt", max_patches=max_patch, input_data_format="channels_last"
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
|
||||
)
|
||||
self.image_processor_tester.num_channels = 3
|
||||
|
||||
def test_call_pytorch(self):
|
||||
# Initialize image_processor
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
expected_hidden_dim = (
|
||||
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
|
||||
* self.image_processor_tester.num_channels
|
||||
) + 2
|
||||
|
||||
for max_patch in self.image_processor_tester.max_patches:
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0], return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs, return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class Kosmos2_5ImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = Kosmos2_5ImageProcessor if is_vision_available() else None
|
||||
fast_image_processing_class = Kosmos2_5ImageProcessorFast if is_torchvision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = Kosmos2_5ImageProcessingTester(self, num_channels=4)
|
||||
self.expected_encoded_image_num_channels = 3
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
# Overwrite from the common test to use `flattened_patches` instead of `pixel_values`.
|
||||
# TODO: enhance the common test to avoid overwriting
|
||||
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
|
||||
@require_vision
|
||||
@require_torch
|
||||
def test_slow_fast_equivalence(self):
|
||||
if not self.test_slow_image_processor or not self.test_fast_image_processor:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test")
|
||||
|
||||
if self.image_processing_class is None or self.fast_image_processing_class is None:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
|
||||
|
||||
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
|
||||
self.assertTrue(torch.allclose(encoding_slow.flattened_patches, encoding_fast.flattened_patches, atol=1e-1))
|
||||
self.assertLessEqual(
|
||||
torch.mean(torch.abs(encoding_slow.flattened_patches - encoding_fast.flattened_patches)).item(), 1e-3
|
||||
)
|
||||
|
||||
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
|
||||
def test_slow_fast_equivalence_batched(self):
|
||||
return super().test_slow_fast_equivalence_batched()
|
||||
|
||||
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet")
|
||||
def test_can_compile_fast_image_processor(self):
|
||||
return super().test_can_compile_fast_image_processor()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processor, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
|
||||
|
||||
def test_call_pil(self):
|
||||
# Initialize image_processor
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL images
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
expected_hidden_dim = (
|
||||
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
|
||||
* (self.image_processor_tester.num_channels - 1)
|
||||
) + 2
|
||||
|
||||
for max_patch in self.image_processor_tester.max_patches:
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0], return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs, return_tensors="pt", max_patches=max_patch
|
||||
).flattened_patches
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
|
||||
)
|
||||
|
||||
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
|
||||
def test_call_numpy(self):
|
||||
return super().test_call_numpy()
|
||||
|
||||
@unittest.skip(reason="Kosmos2_5ImageProcessor does not support 4 channels yet") # FIXME Amy
|
||||
def test_call_pytorch(self):
|
||||
return super().test_call_pytorch()
|
||||
|
||||
@unittest.skip(
|
||||
reason="Kosmos2_5ImageProcessor does treat numpy and PIL 4 channel images consistently"
|
||||
) # FIXME Amy
|
||||
def test_call_numpy_4_channels(self):
|
||||
return super().test_call_pytorch()
|
||||
727
transformers/tests/models/kosmos2_5/test_modeling_kosmos2_5.py
Normal file
727
transformers/tests/models/kosmos2_5/test_modeling_kosmos2_5.py
Normal file
@@ -0,0 +1,727 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch KOSMOS-2.5 model."""
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import requests
|
||||
from parameterized import parameterized
|
||||
|
||||
from transformers import AutoProcessor, Kosmos2_5Config
|
||||
from transformers.models.kosmos2_5.configuration_kosmos2_5 import (
|
||||
Kosmos2_5TextConfig,
|
||||
Kosmos2_5VisionConfig,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
require_flash_attn,
|
||||
require_torch,
|
||||
require_torch_gpu,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import (
|
||||
ModelTesterMixin,
|
||||
_config_zero_init,
|
||||
floats_tensor,
|
||||
ids_tensor,
|
||||
random_attention_mask,
|
||||
)
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import Kosmos2_5ForConditionalGeneration, Kosmos2_5Model
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class Kosmos2_5VisionModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=6,
|
||||
image_size=32,
|
||||
patch_size=4,
|
||||
num_channels=3,
|
||||
is_training=True,
|
||||
hidden_size=32,
|
||||
intermediate_size=64,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
dropout=0,
|
||||
attention_dropout=0,
|
||||
scope=None,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.image_size = image_size
|
||||
self.patch_size = patch_size
|
||||
self.num_channels = num_channels
|
||||
self.is_training = is_training
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.patch_embed_hidden_size = patch_size * patch_size * num_channels
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.scope = scope
|
||||
|
||||
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
|
||||
num_patches = (image_size // patch_size) ** 2
|
||||
self.seq_length = num_patches + 1
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
flattened_patches = floats_tensor([self.batch_size, self.seq_length, self.patch_embed_hidden_size + 2])
|
||||
config = self.get_config()
|
||||
|
||||
return config, flattened_patches
|
||||
|
||||
def get_config(self):
|
||||
return Kosmos2_5VisionConfig(
|
||||
image_size=self.image_size,
|
||||
patch_size=self.patch_size,
|
||||
num_channels=self.num_channels,
|
||||
hidden_size=self.hidden_size,
|
||||
intermediate_size=self.intermediate_size,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
patch_embed_hidden_size=self.patch_embed_hidden_size,
|
||||
dropout=self.dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, flattened_patches = config_and_inputs
|
||||
inputs_dict = {"flattened_patches": flattened_patches}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
class Kosmos2_5TextModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=6,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_input_mask=True,
|
||||
use_labels=True,
|
||||
vocab_size=99,
|
||||
hidden_size=32,
|
||||
ffn_dim=64,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
dropout=0,
|
||||
attention_dropout=0,
|
||||
max_position_embeddings=512,
|
||||
scope=None,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_input_mask = use_input_mask
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.ffn_dim = ffn_dim
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.scope = scope
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
input_mask = None
|
||||
if self.use_input_mask:
|
||||
input_mask = random_attention_mask([self.batch_size, self.seq_length])
|
||||
|
||||
if input_mask is not None:
|
||||
batch_size, seq_length = input_mask.shape
|
||||
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
|
||||
for batch_idx, start_index in enumerate(rnd_start_indices):
|
||||
input_mask[batch_idx, :start_index] = 1
|
||||
input_mask[batch_idx, start_index:] = 0
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return config, input_ids, input_mask
|
||||
|
||||
def get_config(self):
|
||||
return Kosmos2_5TextConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
embed_dim=self.hidden_size,
|
||||
ffn_dim=self.ffn_dim,
|
||||
layers=self.num_hidden_layers,
|
||||
attention_heads=self.num_attention_heads,
|
||||
dropout=self.dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
max_position_embeddings=self.max_position_embeddings,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, input_ids, input_mask = config_and_inputs
|
||||
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
class Kosmos2_5ModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
text_kwargs=None,
|
||||
vision_kwargs=None,
|
||||
latent_query_num=3,
|
||||
is_training=True,
|
||||
):
|
||||
if text_kwargs is None:
|
||||
text_kwargs = {}
|
||||
if vision_kwargs is None:
|
||||
vision_kwargs = {}
|
||||
|
||||
self.parent = parent
|
||||
self.text_model_tester = Kosmos2_5TextModelTester(parent, **text_kwargs)
|
||||
self.vision_model_tester = Kosmos2_5VisionModelTester(parent, **vision_kwargs)
|
||||
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
|
||||
self.seq_length = self.text_model_tester.seq_length
|
||||
self.latent_query_num = latent_query_num
|
||||
self.is_training = is_training
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
|
||||
vision_config, flattened_patches = self.vision_model_tester.prepare_config_and_inputs()
|
||||
|
||||
# build `image_embeds_position_mask`
|
||||
image_embeds_position_mask = torch.zeros_like(input_ids)
|
||||
image_embeds_position_mask[:, 1 : 1 + self.latent_query_num :] = 1
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
image_embeds_position_mask,
|
||||
flattened_patches,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return Kosmos2_5Config(
|
||||
self.text_model_tester.get_config().to_dict(),
|
||||
self.vision_model_tester.get_config().to_dict(),
|
||||
latent_query_num=self.latent_query_num,
|
||||
)
|
||||
|
||||
def create_and_check_model(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
image_embeds_position_mask,
|
||||
flattened_patches,
|
||||
):
|
||||
model = Kosmos2_5Model(config).to(torch_device).eval()
|
||||
with torch.no_grad():
|
||||
result = model(input_ids, flattened_patches, image_embeds_position_mask, attention_mask)
|
||||
self.parent.assertEqual(
|
||||
result.last_hidden_state.shape,
|
||||
(
|
||||
self.text_model_tester.batch_size,
|
||||
self.text_model_tester.seq_length,
|
||||
self.text_model_tester.hidden_size,
|
||||
),
|
||||
)
|
||||
self.parent.assertEqual(
|
||||
result.image_embeds.shape,
|
||||
(
|
||||
self.text_model_tester.batch_size,
|
||||
self.latent_query_num,
|
||||
self.text_model_tester.hidden_size,
|
||||
),
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
image_embeds_position_mask,
|
||||
flattened_patches,
|
||||
) = config_and_inputs
|
||||
inputs_dict = {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"image_embeds_position_mask": image_embeds_position_mask,
|
||||
"flattened_patches": flattened_patches,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class Kosmos2_5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (Kosmos2_5Model, Kosmos2_5ForConditionalGeneration) if is_torch_available() else ()
|
||||
all_generative_model_classes = (Kosmos2_5ForConditionalGeneration,) if is_torch_available() else ()
|
||||
pipeline_model_mapping = (
|
||||
{
|
||||
"feature-extraction": Kosmos2_5Model,
|
||||
"image-to-text": Kosmos2_5ForConditionalGeneration,
|
||||
}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
fx_compatible = False
|
||||
test_head_masking = False
|
||||
test_pruning = False
|
||||
test_resize_embeddings = False
|
||||
test_attention_outputs = False
|
||||
_is_composite = True
|
||||
|
||||
# TODO: `image-to-text` pipeline for this model needs Processor.
|
||||
def is_pipeline_test_to_skip(
|
||||
self,
|
||||
pipeline_test_casse_name,
|
||||
config_class,
|
||||
model_architecture,
|
||||
tokenizer_name,
|
||||
processor_name,
|
||||
):
|
||||
return pipeline_test_casse_name == "ImageToTextPipelineTests"
|
||||
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = copy.deepcopy(inputs_dict)
|
||||
|
||||
if return_labels:
|
||||
if model_class.__name__ == "Kosmos2_5ForConditionalGeneration":
|
||||
inputs_dict["labels"] = torch.zeros(
|
||||
(
|
||||
self.model_tester.text_model_tester.batch_size,
|
||||
self.model_tester.text_model_tester.seq_length,
|
||||
),
|
||||
dtype=torch.long,
|
||||
device=torch_device,
|
||||
)
|
||||
|
||||
if model_class.__name__ in [
|
||||
"Kosmos2_5Model",
|
||||
"Kosmos2_5ForConditionalGeneration",
|
||||
]:
|
||||
bs, _ = inputs_dict["input_ids"].shape
|
||||
seqlen = self.model_tester.text_model_tester.seq_length
|
||||
inputs_dict["input_ids"] = torch.arange(seqlen, device=torch_device).unsqueeze(0).expand(bs, seqlen)
|
||||
inputs_dict["input_ids"] = inputs_dict["input_ids"] % self.model_tester.text_model_tester.vocab_size
|
||||
inputs_dict["attention_mask"] = torch.ones((bs, seqlen), device=torch_device)
|
||||
inputs_dict["image_embeds_position_mask"] = torch.zeros((bs, seqlen), device=torch_device)
|
||||
inputs_dict["image_embeds_position_mask"][:, : self.model_tester.latent_query_num] = 1
|
||||
return inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = Kosmos2_5ModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=Kosmos2_5Config, hidden_size=37)
|
||||
|
||||
@unittest.skip("KOSMOS-2.5 doesn't support padding")
|
||||
def test_eager_padding_matches_padding_free_with_position_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("KOSMOS-2.5 doesn't support padding")
|
||||
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
|
||||
pass
|
||||
|
||||
@parameterized.expand([("random",), ("same",)])
|
||||
@pytest.mark.generate
|
||||
@unittest.skip(
|
||||
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
|
||||
)
|
||||
def test_assisted_decoding_matches_greedy_search(self):
|
||||
pass
|
||||
|
||||
@pytest.mark.generate
|
||||
@unittest.skip(
|
||||
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
|
||||
)
|
||||
def test_assisted_decoding_sample(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
"Kosmos-2.5 doesn't support assisted generation due to the need to extend `image_embeds_position_mask` length."
|
||||
)
|
||||
def test_prompt_lookup_decoding_matches_greedy_search(self):
|
||||
pass
|
||||
|
||||
# overwrite from common to skip `image_to_text_projection.latent_query`
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
if name == "image_to_text_projection.latent_query":
|
||||
# The original code use ` nn.Parameter(torch.randn(...))` for which this test won't pass.
|
||||
continue
|
||||
self.assertIn(
|
||||
((param.data.mean() * 1e9).round() / 1e9).item(),
|
||||
[0.0, 1.0],
|
||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||
)
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
|
||||
expected_arg_names = ["input_ids"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_load_save_without_tied_weights(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.text_config.tie_word_embeddings = False
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
with tempfile.TemporaryDirectory() as d:
|
||||
model.save_pretrained(d)
|
||||
|
||||
model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True)
|
||||
# Checking the state dicts are correct
|
||||
reloaded_state = model_reloaded.state_dict()
|
||||
for k, v in model.state_dict().items():
|
||||
self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded")
|
||||
torch.testing.assert_close(
|
||||
v,
|
||||
reloaded_state[k],
|
||||
msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}",
|
||||
)
|
||||
# Checking there was no complain of missing weights
|
||||
self.assertEqual(infos["missing_keys"], [])
|
||||
|
||||
# overwrite from common in order to use `self.model_tester.text_model_tester.num_hidden_layers`
|
||||
def test_hidden_states_output(self):
|
||||
def check_hidden_states_output(inputs_dict, config, model_class):
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
hidden_states = outputs.hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester,
|
||||
"expected_num_hidden_layers",
|
||||
self.model_tester.text_model_tester.num_hidden_layers + 1,
|
||||
)
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
seq_length = self.model_tester.text_model_tester.seq_length
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[seq_length, self.model_tester.text_model_tester.hidden_size],
|
||||
)
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# check that output_hidden_states also work using config
|
||||
del inputs_dict["output_hidden_states"]
|
||||
config.output_hidden_states = True
|
||||
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# overwrite from common in order to use `config.text_config.vocab_size` instead of `config.vocab_size`
|
||||
def test_tie_model_weights(self):
|
||||
if not self.test_torchscript:
|
||||
self.skipTest(reason="test_torchscript is set to False")
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
def check_same_values(layer_1, layer_2):
|
||||
equal = True
|
||||
for p1, p2 in zip(layer_1.weight, layer_2.weight):
|
||||
if p1.data.ne(p2.data).sum() > 0:
|
||||
equal = False
|
||||
return equal
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
config.torchscript = True
|
||||
model_not_tied = model_class(config)
|
||||
if model_not_tied.get_output_embeddings() is None:
|
||||
continue
|
||||
|
||||
config_tied = copy.deepcopy(config)
|
||||
config_tied.torchscript = False
|
||||
model_tied = model_class(config_tied)
|
||||
params_tied = list(model_tied.parameters())
|
||||
# Check that the embedding layer and decoding layer are the same in size and in value
|
||||
# self.assertTrue(check_same_values(embeddings, decoding))
|
||||
|
||||
# # Check that after modification, they remain the same.
|
||||
# embeddings.weight.data.div_(2)
|
||||
# # Check that the embedding layer and decoding layer are the same in size and in value
|
||||
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
|
||||
# self.assertTrue(check_same_values(embeddings, decoding))
|
||||
|
||||
# # Check that after modification, they remain the same.
|
||||
# decoding.weight.data.div_(4)
|
||||
# # Check that the embedding layer and decoding layer are the same in size and in value
|
||||
# self.assertTrue(embeddings.weight.shape, decoding.weight.shape)
|
||||
# self.assertTrue(check_same_values(embeddings, decoding))
|
||||
|
||||
# Check that after resize they remain tied.
|
||||
model_tied.resize_token_embeddings(config.text_config.vocab_size + 10)
|
||||
params_tied_2 = list(model_tied.parameters())
|
||||
self.assertEqual(len(params_tied_2), len(params_tied))
|
||||
|
||||
# decoding.weight.data.mul_(20)
|
||||
# # Check that the embedding layer and decoding layer are the same in size and in value
|
||||
# self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape)
|
||||
# self.assertTrue(check_same_values(model.transformer.wte, model.lm_head))
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "microsoft/kosmos-2.5"
|
||||
model = Kosmos2_5Model.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
|
||||
def test_model_parallelism(self):
|
||||
pass
|
||||
|
||||
# TODO: ydshieh
|
||||
@require_torch_gpu
|
||||
@slow
|
||||
@unittest.skip(reason="_update_causal_mask is not implemented yet which fails this test")
|
||||
def test_sdpa_can_dispatch_on_flash(self):
|
||||
pass
|
||||
|
||||
# TODO: ydshieh
|
||||
@unittest.skip(reason="doesn't support padding yet")
|
||||
def test_eager_matches_sdpa_inference_1_bfloat16(self):
|
||||
pass
|
||||
|
||||
# TODO: ydshieh
|
||||
@unittest.skip(reason=" the model hasn't been added to auto class")
|
||||
def test_flash_attn_2_from_config(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("This test is currently not well designed for multimodal model (float type as an input).")
|
||||
def test_flash_attn_2_fp32_ln(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("This test is currently not well designed for multimodal model (float type as an input).")
|
||||
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Kosmos 2.5 is multimodel and has specific input shapes.")
|
||||
def test_flash_attn_2_generate_reuse_cache(self):
|
||||
pass
|
||||
|
||||
@pytest.mark.generate
|
||||
@parameterized.expand([("greedy", 1), ("beam search", 2)])
|
||||
@unittest.skip(
|
||||
"KOSMOS-2.5 doesn't support inputs embeds. The test isn't skipped by checking input args because KOSMOS-2 has `generate()` overwritten",
|
||||
)
|
||||
def test_generate_from_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@pytest.mark.generate
|
||||
def test_left_padding_compatibility(self):
|
||||
# Overwrite -- Kosmos-2.5 needs to prepare `image_embeds_position_mask`, and it must be padded accordingly
|
||||
_, inputs_dict = self.prepare_config_and_inputs_for_generate()
|
||||
input_ids = inputs_dict["input_ids"]
|
||||
|
||||
def _prepare_image_embeds_position_mask(input_ids, pad_size):
|
||||
image_embeds_position_mask = torch.zeros(
|
||||
input_ids.shape[0], input_ids.shape[1] + pad_size, device=torch_device, dtype=input_ids.dtype
|
||||
)
|
||||
image_embeds_position_mask[:, (pad_size + 1) : pad_size + 1 + self.model_tester.latent_query_num] = 1
|
||||
return image_embeds_position_mask
|
||||
|
||||
# `image_embeds_position_mask` is randomly generated in `prepare_config_and_inputs_for_generate`, and it must
|
||||
# match its padded version for the test to be valid -- we need to pass both
|
||||
unpadded_custom_inputs = {"image_embeds_position_mask": _prepare_image_embeds_position_mask(input_ids, 0)}
|
||||
padded_custom_inputs = {"image_embeds_position_mask": _prepare_image_embeds_position_mask(input_ids, 32)}
|
||||
super().test_left_padding_compatibility(
|
||||
unpadded_custom_inputs=unpadded_custom_inputs, padded_custom_inputs=padded_custom_inputs
|
||||
)
|
||||
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
@slow
|
||||
class Kosmos2_5ModelIntegrationTest(unittest.TestCase):
|
||||
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
|
||||
# Depending on the hardware we get different logits / generations
|
||||
cuda_compute_capability_major_version = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
if is_torch_available() and torch.cuda.is_available():
|
||||
# 8 is for A100 / A10 and 7 for T4
|
||||
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
|
||||
|
||||
def run_example(self, prompt, image, model, processor):
|
||||
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
||||
inputs = {k: v.to(torch_device) if v is not None else None for k, v in inputs.items()}
|
||||
inputs["flattened_patches"] = inputs["flattened_patches"].to(model.dtype)
|
||||
|
||||
generation_outputs = model.generate(
|
||||
**inputs,
|
||||
max_new_tokens=1024,
|
||||
)
|
||||
generated_ids = generation_outputs
|
||||
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
|
||||
return generated_ids, generated_text
|
||||
|
||||
def test_eager(self):
|
||||
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
dtype = torch.bfloat16
|
||||
repo = "microsoft/kosmos-2.5"
|
||||
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
|
||||
repo, device_map=torch_device, dtype=dtype, attn_implementation="eager"
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(repo)
|
||||
prompt = "<ocr>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
EXPECTED_TEXT = {
|
||||
7: [
|
||||
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
|
||||
],
|
||||
8: [
|
||||
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_644></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_687></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
|
||||
],
|
||||
}
|
||||
|
||||
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
|
||||
|
||||
prompt = "<md>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
|
||||
EXPECTED_TEXT = {
|
||||
7: [
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
|
||||
],
|
||||
8: [
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
|
||||
],
|
||||
}
|
||||
|
||||
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
|
||||
|
||||
def test_sdpa(self):
|
||||
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
dtype = torch.bfloat16
|
||||
repo = "microsoft/kosmos-2.5"
|
||||
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
|
||||
repo, device_map=torch_device, dtype=dtype, attn_implementation="sdpa"
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(repo)
|
||||
prompt = "<ocr>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
EXPECTED_TEXT = {
|
||||
7: [
|
||||
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n",
|
||||
],
|
||||
8: [
|
||||
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_611></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_810><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_648></bbox>1\n<bbox><x_79><y_614><x_468><y_651></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_609><x_812><y_642></bbox>0\n<bbox><x_50><y_658><x_69><y_693></bbox>1\n<bbox><x_79><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_814><y_683></bbox>0\n<bbox><x_31><y_742><x_820><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_781><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_872></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_836><y_1108></bbox>Card Payment 50,000\n"
|
||||
],
|
||||
}
|
||||
|
||||
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
|
||||
|
||||
prompt = "<md>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
|
||||
EXPECTED_TEXT = {
|
||||
7: [
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
|
||||
],
|
||||
8: [
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n\nCard Payment 50,000"
|
||||
],
|
||||
}
|
||||
|
||||
self.assertListEqual(generated_text, EXPECTED_TEXT[self.cuda_compute_capability_major_version])
|
||||
|
||||
@require_flash_attn
|
||||
@require_torch_gpu
|
||||
@pytest.mark.flash_attn_test
|
||||
@slow
|
||||
def test_FA2(self):
|
||||
url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
dtype = torch.bfloat16
|
||||
repo = "microsoft/kosmos-2.5"
|
||||
model = Kosmos2_5ForConditionalGeneration.from_pretrained(
|
||||
repo,
|
||||
device_map=torch_device,
|
||||
dtype=dtype,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(repo)
|
||||
prompt = "<ocr>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
EXPECTED_TEXT = [
|
||||
"<bbox><x_53><y_573><x_69><y_606></bbox>1\n<bbox><x_79><y_573><x_464><y_612></bbox>[REG] BLACK SAKURA\n<bbox><x_690><y_569><x_812><y_606></bbox>45,455\n<bbox><x_53><y_614><x_69><y_650></bbox>1\n<bbox><x_79><y_614><x_468><y_650></bbox>COOKIE DOH SAUCES\n<bbox><x_788><y_610><x_813><y_644></bbox>0\n<bbox><x_50><y_658><x_65><y_693></bbox>1\n<bbox><x_76><y_658><x_358><y_693></bbox>NATA DE COCO\n<bbox><x_790><y_652><x_815><y_687></bbox>0\n<bbox><x_31><y_742><x_822><y_781></bbox>Sub Total 45,455\n<bbox><x_27><y_780><x_822><y_827></bbox>PB1 (10%) 4,545\n<bbox><x_27><y_826><x_824><y_874></bbox>Rounding 0\n<bbox><x_24><y_872><x_827><y_921></bbox>Total 50,000\n<bbox><x_17><y_1056><x_835><y_1108></bbox>Card Payment 50,000\n"
|
||||
]
|
||||
|
||||
self.assertListEqual(generated_text, EXPECTED_TEXT)
|
||||
|
||||
prompt = "<md>"
|
||||
generated_ids, generated_text = self.run_example(prompt, image, model, processor)
|
||||
# A10 gives the 1st one, but A100 gives the 2nd one
|
||||
EXPECTED_TEXT = [
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n\n<table>\n<thead>\n<tr>\n<th>\nSub Total\n</th>\n<th>\n45,455\n</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>\nPB1 (10%)\n</td>\n<td>\n4,545\n</td>\n</tr>\n<tr>\n<td>\nRounding\n</td>\n<td>\n0\n</td>\n</tr>\n<tr>\n<td>\n<strong>\nTotal\n</strong>\n</td>\n<td>\n<strong>\n50,000\n</strong>\n</td>\n</tr>\n</tbody>\n</table>\n\nCard Payment 50,000",
|
||||
"- **1 \\[REG\\] BLACK SAKURA** 45,455\n- **1 COOKIE DOH SAUCES** 0\n- **1 NATA DE COCO** 0\n- **Sub Total** 45,455\n- **PB1 (10%)** 4,545\n- **Rounding** 0\n- **Total** **50,000**\n",
|
||||
]
|
||||
self.assertIn(generated_text[0], EXPECTED_TEXT)
|
||||
391
transformers/tests/models/kosmos2_5/test_processor_kosmos2_5.py
Normal file
391
transformers/tests/models/kosmos2_5/test_processor_kosmos2_5.py
Normal file
@@ -0,0 +1,391 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from transformers.image_utils import load_image
|
||||
from transformers.testing_utils import (
|
||||
require_torch,
|
||||
require_vision,
|
||||
)
|
||||
from transformers.utils import is_vision_available
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
AutoTokenizer,
|
||||
Kosmos2_5ImageProcessor,
|
||||
Kosmos2_5Processor,
|
||||
PreTrainedTokenizerFast,
|
||||
)
|
||||
|
||||
|
||||
@require_vision
|
||||
class Kosmos2_5ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
processor_class = Kosmos2_5Processor
|
||||
images_input_name = "flattened_patches"
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdirname = tempfile.mkdtemp()
|
||||
image_processor = Kosmos2_5ImageProcessor()
|
||||
tokenizer = AutoTokenizer.from_pretrained("microsoft/kosmos-2.5")
|
||||
processor = Kosmos2_5Processor(image_processor, tokenizer)
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
|
||||
def get_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmpdirname)
|
||||
|
||||
def test_image_procesor_load_save_reload(self):
|
||||
# make sure load from Hub repo. -> save -> reload locally work
|
||||
image_processor = Kosmos2_5ImageProcessor.from_pretrained("microsoft/kosmos-2.5")
|
||||
with TemporaryDirectory() as tmp_dir:
|
||||
image_processor.save_pretrained(tmp_dir)
|
||||
reloaded_image_processor = Kosmos2_5ImageProcessor.from_pretrained(tmp_dir)
|
||||
assert image_processor.to_dict() == reloaded_image_processor.to_dict()
|
||||
assert image_processor.to_json_string() == reloaded_image_processor.to_json_string()
|
||||
|
||||
def test_save_load_pretrained_additional_features(self):
|
||||
processor = Kosmos2_5Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
|
||||
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
|
||||
|
||||
processor = Kosmos2_5Processor.from_pretrained(
|
||||
self.tmpdirname,
|
||||
bos_token="(BOS)",
|
||||
eos_token="(EOS)",
|
||||
do_normalize=False,
|
||||
padding_value=1.0,
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
|
||||
|
||||
self.assertEqual(
|
||||
processor.image_processor.to_json_string(),
|
||||
image_processor_add_kwargs.to_json_string(),
|
||||
)
|
||||
self.assertIsInstance(processor.image_processor, Kosmos2_5ImageProcessor)
|
||||
|
||||
@unittest.skip(reason="kosmos-2.5 must have both image and text")
|
||||
def test_image_processor(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="kosmos-2.5 must have both image and text")
|
||||
def test_tokenizer(self):
|
||||
pass
|
||||
|
||||
def test_tokenizer_decode(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
|
||||
processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor)
|
||||
|
||||
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
|
||||
|
||||
decoded_processor = processor.batch_decode(predicted_ids)
|
||||
decoded_tok = tokenizer.batch_decode(predicted_ids)
|
||||
|
||||
self.assertListEqual(decoded_tok, decoded_processor)
|
||||
|
||||
def test_can_load_various_tokenizers(self):
|
||||
for checkpoint in ["microsoft/kosmos-2.5"]:
|
||||
processor = AutoProcessor.from_pretrained(checkpoint)
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
|
||||
|
||||
@require_torch
|
||||
def test_model_input_names(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
|
||||
processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor)
|
||||
|
||||
input_str = "This is a test"
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
# both image and text
|
||||
inputs = processor(text=input_str, images=image_input)
|
||||
self.assertListEqual(
|
||||
list(inputs.keys()),
|
||||
[
|
||||
"flattened_patches",
|
||||
"attention_mask",
|
||||
"width",
|
||||
"height",
|
||||
"input_ids",
|
||||
"image_embeds_position_mask",
|
||||
],
|
||||
)
|
||||
# test if it raises when no input is passed
|
||||
with pytest.raises(ValueError):
|
||||
processor()
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_image_processor_defaults_preserved_by_image_kwargs(self):
|
||||
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
image_processor = self.get_component("image_processor", max_patches=1024, patch_size={"height": 8, "width": 8})
|
||||
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
input_str = self.prepare_text_inputs()
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
inputs = processor(text=input_str, images=image_input)
|
||||
self.assertEqual(len(inputs["flattened_patches"][0][0]), 194)
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_kwargs_overrides_default_image_processor_kwargs(self):
|
||||
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
image_processor = self.get_component("image_processor", max_patches=4096)
|
||||
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
input_str = self.prepare_text_inputs()
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
inputs = processor(text=input_str, images=image_input, max_patches=1024)
|
||||
self.assertEqual(len(inputs["flattened_patches"][0]), 1024)
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_unstructured_kwargs(self):
|
||||
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
image_processor = self.get_component("image_processor")
|
||||
tokenizer = self.get_component("tokenizer")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
input_str = self.prepare_text_inputs()
|
||||
image_input = self.prepare_image_inputs()
|
||||
inputs = processor(
|
||||
text=input_str,
|
||||
images=image_input,
|
||||
return_tensors="pt",
|
||||
max_patches=1024,
|
||||
padding="max_length",
|
||||
max_length=76,
|
||||
)
|
||||
|
||||
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
|
||||
self.assertEqual(len(inputs["input_ids"][0]), 76)
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_unstructured_kwargs_batched(self):
|
||||
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
image_processor = self.get_component("image_processor")
|
||||
tokenizer = self.get_component("tokenizer")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
input_str = self.prepare_text_inputs(batch_size=2)
|
||||
image_input = self.prepare_image_inputs(batch_size=2)
|
||||
inputs = processor(
|
||||
text=input_str,
|
||||
images=image_input,
|
||||
return_tensors="pt",
|
||||
max_patches=1024,
|
||||
padding="longest",
|
||||
max_length=76,
|
||||
)
|
||||
|
||||
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
|
||||
|
||||
self.assertEqual(len(inputs["input_ids"][0]), 76)
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_structured_kwargs_nested(self):
|
||||
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
image_processor = self.get_component("image_processor")
|
||||
tokenizer = self.get_component("tokenizer")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
input_str = self.prepare_text_inputs()
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
# Define the kwargs for each modality
|
||||
all_kwargs = {
|
||||
"common_kwargs": {"return_tensors": "pt"},
|
||||
"images_kwargs": {"max_patches": 1024},
|
||||
"text_kwargs": {"padding": "max_length", "max_length": 76},
|
||||
}
|
||||
|
||||
inputs = processor(text=input_str, images=image_input, **all_kwargs)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
|
||||
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
|
||||
|
||||
self.assertEqual(len(inputs["input_ids"][0]), 76)
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
def test_structured_kwargs_nested_from_dict(self):
|
||||
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
|
||||
if "image_processor" not in self.processor_class.attributes:
|
||||
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
|
||||
|
||||
image_processor = self.get_component("image_processor")
|
||||
tokenizer = self.get_component("tokenizer")
|
||||
|
||||
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
|
||||
self.skip_processor_without_typed_kwargs(processor)
|
||||
input_str = self.prepare_text_inputs()
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
# Define the kwargs for each modality
|
||||
all_kwargs = {
|
||||
"common_kwargs": {"return_tensors": "pt"},
|
||||
"images_kwargs": {"max_patches": 1024},
|
||||
"text_kwargs": {"padding": "max_length", "max_length": 76},
|
||||
}
|
||||
|
||||
inputs = processor(text=input_str, images=image_input, **all_kwargs)
|
||||
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
|
||||
|
||||
self.assertEqual(len(inputs["input_ids"][0]), 76)
|
||||
|
||||
@require_torch
|
||||
def test_full_processor(self):
|
||||
url = url_to_local_path("https://huggingface.co/kirp/kosmos2_5/resolve/main/receipt_00008.png")
|
||||
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2.5")
|
||||
texts = ["<md>", "<ocr>"]
|
||||
expected_input_ids = [
|
||||
[100288],
|
||||
[100282],
|
||||
]
|
||||
expected_attention_mask = [[1], [1]]
|
||||
|
||||
image = load_image(url)
|
||||
# To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed
|
||||
image_path = os.path.join(self.tmpdirname, "image.png")
|
||||
image.save(image_path)
|
||||
image = Image.open(image_path)
|
||||
|
||||
# test single image
|
||||
outputs = processor(images=image, text=texts[0])
|
||||
self.assertListEqual(
|
||||
outputs.input_ids[0].numpy().tolist(),
|
||||
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[0],
|
||||
)
|
||||
self.assertListEqual(
|
||||
outputs.image_embeds_position_mask[0].numpy().tolist(),
|
||||
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[0])),
|
||||
)
|
||||
self.assertListEqual(
|
||||
outputs.attention_mask[0].numpy().tolist(),
|
||||
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[0],
|
||||
)
|
||||
EXPECTED_FP_1 = [
|
||||
1.0,
|
||||
2.0,
|
||||
-2.9527735710144043,
|
||||
-2.672085762023926,
|
||||
-2.9933173656463623,
|
||||
-2.905944585800171,
|
||||
-2.5891761779785156,
|
||||
-2.8751866817474365,
|
||||
-2.962153434753418,
|
||||
-2.588062047958374,
|
||||
]
|
||||
EXPECTED_FP_200 = [
|
||||
4.0,
|
||||
45.0,
|
||||
1.5713728666305542,
|
||||
1.584628939628601,
|
||||
1.3589054346084595,
|
||||
1.6515952348709106,
|
||||
1.7014952898025513,
|
||||
1.3731343746185303,
|
||||
1.6010395288467407,
|
||||
1.6607422828674316,
|
||||
]
|
||||
self.assertTupleEqual(outputs.flattened_patches.shape, (1, 4096, 770))
|
||||
np.testing.assert_allclose(
|
||||
outputs.flattened_patches[0][1][:10].numpy().tolist(),
|
||||
EXPECTED_FP_1,
|
||||
atol=1e-9,
|
||||
)
|
||||
np.testing.assert_allclose(
|
||||
outputs.flattened_patches[0][200][:10].numpy().tolist(),
|
||||
EXPECTED_FP_200,
|
||||
atol=1e-9,
|
||||
)
|
||||
|
||||
# test a batch of images and texts, right padding
|
||||
outputs = processor(images=[image, image], text=texts)
|
||||
self.assertListEqual(
|
||||
outputs.input_ids[1].numpy().tolist(),
|
||||
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[1],
|
||||
)
|
||||
self.assertListEqual(
|
||||
outputs.image_embeds_position_mask[1].numpy().tolist(),
|
||||
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[1])),
|
||||
)
|
||||
self.assertListEqual(
|
||||
outputs.attention_mask[1].numpy().tolist(),
|
||||
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[1],
|
||||
)
|
||||
self.assertTupleEqual(outputs.flattened_patches.shape, (2, 4096, 770))
|
||||
np.testing.assert_allclose(
|
||||
outputs.flattened_patches[1][1][:10].numpy().tolist(),
|
||||
EXPECTED_FP_1,
|
||||
atol=1e-9,
|
||||
)
|
||||
np.testing.assert_allclose(
|
||||
outputs.flattened_patches[1][200][:10].numpy().tolist(),
|
||||
EXPECTED_FP_200,
|
||||
atol=1e-9,
|
||||
)
|
||||
Reference in New Issue
Block a user