init
This commit is contained in:
0
transformers/tests/models/got_ocr2/__init__.py
Normal file
0
transformers/tests/models/got_ocr2/__init__.py
Normal file
@@ -0,0 +1,202 @@
|
||||
# Copyright 2022 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers.image_utils import SizeDict
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_vision_available():
|
||||
from transformers import GotOcr2ImageProcessor
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import GotOcr2ImageProcessorFast
|
||||
|
||||
|
||||
class GotOcr2ImageProcessingTester(unittest.TestCase):
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
image_size=18,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
do_pad=False,
|
||||
image_mean=[0.48145466, 0.4578275, 0.40821073],
|
||||
image_std=[0.26862954, 0.26130258, 0.27577711],
|
||||
do_convert_rgb=True,
|
||||
):
|
||||
super().__init__()
|
||||
size = size if size is not None else {"height": 20, "width": 20}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.do_pad = do_pad
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"do_normalize": self.do_normalize,
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_convert_rgb": self.do_convert_rgb,
|
||||
"do_pad": self.do_pad,
|
||||
}
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
return self.num_channels, self.size["height"], self.size["width"]
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class GotOcr2ProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = GotOcr2ImageProcessor if is_vision_available() else None
|
||||
fast_image_processing_class = GotOcr2ImageProcessorFast if is_torchvision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = GotOcr2ImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processor, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processor, "size"))
|
||||
self.assertTrue(hasattr(image_processor, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processor, "image_mean"))
|
||||
self.assertTrue(hasattr(image_processor, "image_std"))
|
||||
self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
|
||||
|
||||
def test_slow_fast_equivalence_crop_to_patches(self):
|
||||
dummy_image = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)[0]
|
||||
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
|
||||
|
||||
torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
|
||||
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
|
||||
|
||||
def test_slow_fast_equivalence_batched_crop_to_patches(self):
|
||||
# Prepare image inputs so that we have two groups of images with equal resolution with a group of images with
|
||||
# different resolutions in between
|
||||
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
|
||||
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
dummy_images += self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
|
||||
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict, crop_to_patches=True)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict, crop_to_patches=True)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
|
||||
|
||||
torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)
|
||||
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
|
||||
|
||||
def test_crop_to_patches(self):
|
||||
# test slow image processor
|
||||
image_processor = self.image_processor_list[0](**self.image_processor_dict)
|
||||
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)[0]
|
||||
processed_images = image_processor.crop_image_to_patches(
|
||||
image,
|
||||
min_patches=1,
|
||||
max_patches=6,
|
||||
use_thumbnail=True,
|
||||
patch_size={"height": 20, "width": 20},
|
||||
)
|
||||
self.assertEqual(len(processed_images), 5)
|
||||
self.assertEqual(processed_images[0].shape[:2], (20, 20))
|
||||
|
||||
# test fast image processor (process batch)
|
||||
image_processor = self.image_processor_list[1](**self.image_processor_dict)
|
||||
image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)[0]
|
||||
processed_images = image_processor.crop_image_to_patches(
|
||||
image.unsqueeze(0),
|
||||
min_patches=1,
|
||||
max_patches=6,
|
||||
use_thumbnail=True,
|
||||
patch_size=SizeDict(height=20, width=20),
|
||||
)
|
||||
self.assertEqual(len(processed_images[0]), 5)
|
||||
self.assertEqual(processed_images.shape[-2:], (20, 20))
|
||||
|
||||
def test_get_num_patches_without_images(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
num_patches = image_processing.get_number_of_image_patches(height=100, width=100, images_kwargs={})
|
||||
self.assertEqual(num_patches, 1)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=300, width=500, images_kwargs={"crop_to_patches": False}
|
||||
)
|
||||
self.assertEqual(num_patches, 1)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=20, width=20, images_kwargs={"crop_to_patches": True}
|
||||
)
|
||||
self.assertEqual(num_patches, 1)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=60, width=60, images_kwargs={"crop_to_patches": True}
|
||||
)
|
||||
self.assertEqual(num_patches, 10)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=100, width=100, images_kwargs={"crop_to_patches": True}
|
||||
)
|
||||
self.assertEqual(num_patches, 10)
|
||||
|
||||
num_patches = image_processing.get_number_of_image_patches(
|
||||
height=100, width=100, images_kwargs={"crop_to_patches": True, "max_patches": 200}
|
||||
)
|
||||
self.assertEqual(num_patches, 50)
|
||||
296
transformers/tests/models/got_ocr2/test_modeling_got_ocr2.py
Normal file
296
transformers/tests/models/got_ocr2/test_modeling_got_ocr2.py
Normal file
@@ -0,0 +1,296 @@
|
||||
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch GotOcr2 model."""
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
GotOcr2Config,
|
||||
is_torch_available,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.testing_utils import cleanup, require_torch, slow, torch_device
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import (
|
||||
GotOcr2ForConditionalGeneration,
|
||||
GotOcr2Model,
|
||||
)
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from transformers.image_utils import load_image
|
||||
|
||||
|
||||
class GotOcr2VisionText2TextModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=3,
|
||||
seq_length=7,
|
||||
num_channels=3,
|
||||
ignore_index=-100,
|
||||
image_size=64,
|
||||
image_token_index=1,
|
||||
model_type="got_ocr2",
|
||||
is_training=True,
|
||||
text_config={
|
||||
"model_type": "qwen2",
|
||||
"vocab_size": 99,
|
||||
"hidden_size": 128,
|
||||
"intermediate_size": 37,
|
||||
"num_hidden_layers": 2,
|
||||
"num_attention_heads": 4,
|
||||
"num_key_value_heads": 2,
|
||||
"output_channels": 64,
|
||||
"hidden_act": "silu",
|
||||
"max_position_embeddings": 512,
|
||||
"rope_theta": 10000,
|
||||
"mlp_ratio": 4,
|
||||
"tie_word_embeddings": True,
|
||||
"bos_token_id": 2,
|
||||
"eos_token_id": 3,
|
||||
"pad_token_id": 4,
|
||||
},
|
||||
vision_config={
|
||||
"num_hidden_layers": 2,
|
||||
"output_channels": 64,
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 32,
|
||||
"mlp_dim": 128,
|
||||
"num_attention_heads": 4,
|
||||
"patch_size": 2,
|
||||
"image_size": 64,
|
||||
},
|
||||
):
|
||||
self.parent = parent
|
||||
self.ignore_index = ignore_index
|
||||
self.bos_token_id = text_config["bos_token_id"]
|
||||
self.eos_token_id = text_config["eos_token_id"]
|
||||
self.pad_token_id = text_config["pad_token_id"]
|
||||
self.image_token_index = image_token_index
|
||||
self.model_type = model_type
|
||||
self.text_config = text_config
|
||||
self.vision_config = vision_config
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.is_training = is_training
|
||||
self.num_image_tokens = 64
|
||||
self.seq_length = seq_length + self.num_image_tokens
|
||||
|
||||
self.num_hidden_layers = text_config["num_hidden_layers"]
|
||||
self.vocab_size = text_config["vocab_size"]
|
||||
self.hidden_size = text_config["hidden_size"]
|
||||
self.num_attention_heads = text_config["num_attention_heads"]
|
||||
|
||||
def get_config(self):
|
||||
return GotOcr2Config(
|
||||
text_config=self.text_config,
|
||||
vision_config=self.vision_config,
|
||||
model_type=self.model_type,
|
||||
image_token_index=self.image_token_index,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
config = self.get_config()
|
||||
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
|
||||
|
||||
return config, pixel_values
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values = config_and_inputs
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
|
||||
|
||||
input_ids[input_ids == self.image_token_index] = self.pad_token_id
|
||||
input_ids[:, : self.num_image_tokens] = self.image_token_index
|
||||
|
||||
inputs_dict = {
|
||||
"pixel_values": pixel_values,
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class GotOcr2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
GotOcr2Model,
|
||||
GotOcr2ForConditionalGeneration,
|
||||
)
|
||||
if is_torch_available()
|
||||
else ()
|
||||
)
|
||||
pipeline_model_mapping = (
|
||||
{
|
||||
"image-to-text": GotOcr2ForConditionalGeneration,
|
||||
"image-text-to-text": GotOcr2ForConditionalGeneration,
|
||||
}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
test_headmasking = False
|
||||
test_pruning = False
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = GotOcr2VisionText2TextModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=GotOcr2Config, has_text_modality=False)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
self.assertIn(
|
||||
((param.data.mean() * 1e9).round() / 1e9).item(),
|
||||
[0.0, 1.0],
|
||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
class GotOcr2IntegrationTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
|
||||
|
||||
def tearDown(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_stop_strings(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_ocr/resolve/main/iam_picture.jpeg"
|
||||
)
|
||||
|
||||
inputs = self.processor(image, return_tensors="pt").to(torch_device)
|
||||
generate_ids = model.generate(
|
||||
**inputs,
|
||||
do_sample=False,
|
||||
num_beams=1,
|
||||
tokenizer=self.processor.tokenizer,
|
||||
stop_strings="<|im_end|>",
|
||||
max_new_tokens=4096,
|
||||
)
|
||||
decoded_output = self.processor.decode(
|
||||
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = "industre"
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_format(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
|
||||
)
|
||||
|
||||
inputs = self.processor(image, return_tensors="pt", format=True).to(torch_device)
|
||||
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
|
||||
decoded_output = self.processor.decode(
|
||||
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = "\\title{\nR"
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_fine_grained(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
|
||||
)
|
||||
|
||||
inputs = self.processor(image, return_tensors="pt", color="green").to(torch_device)
|
||||
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
|
||||
decoded_output = self.processor.decode(
|
||||
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = "You should keep in"
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_crop_to_patches(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/one_column.png"
|
||||
)
|
||||
|
||||
inputs = self.processor(image, return_tensors="pt", crop_to_patches=True).to(torch_device)
|
||||
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
|
||||
decoded_output = self.processor.decode(
|
||||
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = "on developing architectural improvements"
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_multi_pages(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image1 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/one_column.png"
|
||||
)
|
||||
image2 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
|
||||
)
|
||||
|
||||
inputs = self.processor([image1, image2], return_tensors="pt", multi_page=True).to(torch_device)
|
||||
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
|
||||
decoded_output = self.processor.decode(
|
||||
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = "on developing architectural improvements"
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_got_ocr_batched(self):
|
||||
model_id = "stepfun-ai/GOT-OCR-2.0-hf"
|
||||
model = GotOcr2ForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
|
||||
image1 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
|
||||
)
|
||||
image2 = load_image(
|
||||
"https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/image_ocr.jpg"
|
||||
)
|
||||
|
||||
inputs = self.processor([image1, image2], return_tensors="pt").to(torch_device)
|
||||
generate_ids = model.generate(**inputs, do_sample=False, num_beams=1, max_new_tokens=4)
|
||||
decoded_output = self.processor.batch_decode(
|
||||
generate_ids[:, inputs["input_ids"].shape[1] :], skip_special_tokens=True
|
||||
)
|
||||
expected_output = ["Reducing the number", "R&D QUALITY"]
|
||||
self.assertEqual(decoded_output, expected_output)
|
||||
104
transformers/tests/models/got_ocr2/test_processing_got_ocr2.py
Normal file
104
transformers/tests/models/got_ocr2/test_processing_got_ocr2.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from transformers import AutoProcessor, GotOcr2Processor, PreTrainedTokenizerFast
|
||||
from transformers.testing_utils import require_vision
|
||||
from transformers.utils import is_vision_available
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from transformers import GotOcr2ImageProcessor
|
||||
|
||||
|
||||
@require_vision
|
||||
class GotOcr2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
processor_class = GotOcr2Processor
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tmpdirname = tempfile.mkdtemp()
|
||||
|
||||
image_processor = GotOcr2ImageProcessor()
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
|
||||
processor_kwargs = {}
|
||||
processor = GotOcr2Processor(image_processor, tokenizer, **processor_kwargs)
|
||||
processor.save_pretrained(cls.tmpdirname)
|
||||
cls.image_token = processor.img_pad_token
|
||||
|
||||
def get_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
||||
|
||||
def test_ocr_queries(self):
|
||||
processor = self.get_processor()
|
||||
image_input = self.prepare_image_inputs()
|
||||
inputs = processor(image_input, return_tensors="pt")
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 286))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384))
|
||||
|
||||
inputs = processor(image_input, return_tensors="pt", format=True)
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 288))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384))
|
||||
|
||||
inputs = processor(image_input, return_tensors="pt", color="red")
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 290))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384))
|
||||
|
||||
inputs = processor(image_input, return_tensors="pt", box=[0, 0, 100, 100])
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 303))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384))
|
||||
|
||||
inputs = processor([image_input, image_input], return_tensors="pt", multi_page=True, format=True)
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 547))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 384, 384))
|
||||
|
||||
inputs = processor(image_input, return_tensors="pt", crop_to_patches=True, max_patches=6)
|
||||
self.assertEqual(inputs["input_ids"].shape, (1, 1826))
|
||||
self.assertEqual(inputs["pixel_values"].shape, (7, 3, 384, 384))
|
||||
|
||||
def test_processor_text_has_no_visual(self):
|
||||
# Overwritten: requires `multi_page` kwarg to process nested vision inputs
|
||||
processor = self.get_processor()
|
||||
|
||||
text = self.prepare_text_inputs(batch_size=3, modalities="image")
|
||||
image_inputs = self.prepare_image_inputs(batch_size=3)
|
||||
processing_kwargs = {"return_tensors": "pt", "padding": True, "multi_page": True}
|
||||
|
||||
# Call with nested list of vision inputs
|
||||
image_inputs_nested = [[image] if not isinstance(image, list) else image for image in image_inputs]
|
||||
inputs_dict_nested = {"text": text, "images": image_inputs_nested}
|
||||
inputs = processor(**inputs_dict_nested, **processing_kwargs)
|
||||
self.assertTrue(self.text_input_name in inputs)
|
||||
|
||||
# Call with one of the samples with no associated vision input
|
||||
plain_text = "lower newer"
|
||||
image_inputs_nested[0] = []
|
||||
text[0] = plain_text
|
||||
inputs_dict_no_vision = {"text": text, "images": image_inputs_nested}
|
||||
inputs_nested = processor(**inputs_dict_no_vision, **processing_kwargs)
|
||||
self.assertListEqual(
|
||||
inputs[self.text_input_name][1:].tolist(), inputs_nested[self.text_input_name][1:].tolist()
|
||||
)
|
||||
Reference in New Issue
Block a user