init
This commit is contained in:
0
transformers/tests/models/layoutlmv2/__init__.py
Normal file
0
transformers/tests/models/layoutlmv2/__init__.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# Copyright 2021 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from packaging import version
|
||||
|
||||
from transformers.testing_utils import (
|
||||
require_pytesseract,
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import (
|
||||
is_pytesseract_available,
|
||||
is_torch_available,
|
||||
is_torchvision_available,
|
||||
)
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_pytesseract_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import LayoutLMv2ImageProcessor
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import LayoutLMv2ImageProcessorFast
|
||||
|
||||
|
||||
class LayoutLMv2ImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
image_size=18,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
apply_ocr=True,
|
||||
):
|
||||
size = size if size is not None else {"height": 18, "width": 18}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.apply_ocr = apply_ocr
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
return self.num_channels, self.size["height"], self.size["width"]
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_pytesseract
|
||||
class LayoutLMv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None
|
||||
fast_image_processing_class = (
|
||||
LayoutLMv2ImageProcessorFast if (is_torchvision_available() and is_pytesseract_available()) else None
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = LayoutLMv2ImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processing, "size"))
|
||||
self.assertTrue(hasattr(image_processing, "apply_ocr"))
|
||||
|
||||
def test_image_processor_from_dict_with_kwargs(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict)
|
||||
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
|
||||
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
|
||||
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
|
||||
|
||||
@unittest.skip(reason="Tesseract version is not correct in ci. @Arthur FIXME")
|
||||
def test_layoutlmv2_integration_test(self):
|
||||
from datasets import load_dataset
|
||||
|
||||
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# with apply_OCR = True
|
||||
image_processing = image_processing_class()
|
||||
|
||||
image = ds[0]["image"]
|
||||
|
||||
encoding = image_processing(image, return_tensors="pt")
|
||||
|
||||
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
|
||||
self.assertEqual(len(encoding.words), len(encoding.boxes))
|
||||
|
||||
# fmt: off
|
||||
# the words and boxes were obtained with Tesseract 5.3.0
|
||||
expected_words = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
|
||||
expected_boxes = [[[141, 57, 210, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [695, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
|
||||
# fmt: on
|
||||
|
||||
self.assertListEqual(encoding.words, expected_words)
|
||||
self.assertListEqual(encoding.boxes, expected_boxes)
|
||||
|
||||
# with apply_OCR = False
|
||||
image_processing = image_processing_class(apply_ocr=False)
|
||||
|
||||
encoding = image_processing(image, return_tensors="pt")
|
||||
|
||||
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224))
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
def test_slow_fast_equivalence(self):
|
||||
if not self.test_slow_image_processor or not self.test_fast_image_processor:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test")
|
||||
|
||||
if self.image_processing_class is None or self.fast_image_processing_class is None:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
|
||||
|
||||
dummy_image = Image.open(
|
||||
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
|
||||
)
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
|
||||
self._assert_slow_fast_tensors_equivalence(
|
||||
encoding_slow.pixel_values.float() / 255, encoding_fast.pixel_values.float() / 255
|
||||
)
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
def test_slow_fast_equivalence_batched(self):
|
||||
if not self.test_slow_image_processor or not self.test_fast_image_processor:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test")
|
||||
|
||||
if self.image_processing_class is None or self.fast_image_processing_class is None:
|
||||
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
|
||||
|
||||
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
|
||||
self.skipTest(
|
||||
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
|
||||
)
|
||||
|
||||
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
|
||||
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
|
||||
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
|
||||
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
|
||||
|
||||
self._assert_slow_fast_tensors_equivalence(
|
||||
encoding_slow.pixel_values.float() / 255, encoding_fast.pixel_values.float() / 255
|
||||
)
|
||||
|
||||
# Overriding as we can't use torch.testing.assert_close on int8 tensors
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
@require_vision
|
||||
@pytest.mark.torch_compile_test
|
||||
def test_can_compile_fast_image_processor(self):
|
||||
if self.fast_image_processing_class is None:
|
||||
self.skipTest("Skipping compilation test as fast image processor is not defined")
|
||||
if version.parse(torch.__version__) < version.parse("2.3"):
|
||||
self.skipTest(reason="This test requires torch >= 2.3 to run.")
|
||||
|
||||
torch.compiler.reset()
|
||||
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
|
||||
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
|
||||
image_processor = torch.compile(image_processor, mode="reduce-overhead")
|
||||
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
|
||||
self._assert_slow_fast_tensors_equivalence(
|
||||
output_eager.pixel_values.float() / 255, output_compiled.pixel_values.float() / 255
|
||||
)
|
||||
566
transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
Normal file
566
transformers/tests/models/layoutlmv2/test_modeling_layoutlmv2.py
Normal file
@@ -0,0 +1,566 @@
|
||||
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch LayoutLMv2 model."""
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers.testing_utils import (
|
||||
require_detectron2,
|
||||
require_non_xpu,
|
||||
require_torch,
|
||||
require_torch_multi_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import is_detectron2_available, is_torch_available
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from transformers import (
|
||||
LayoutLMv2Config,
|
||||
LayoutLMv2ForQuestionAnswering,
|
||||
LayoutLMv2ForSequenceClassification,
|
||||
LayoutLMv2ForTokenClassification,
|
||||
LayoutLMv2Model,
|
||||
)
|
||||
|
||||
if is_detectron2_available():
|
||||
from detectron2.structures.image_list import ImageList
|
||||
|
||||
|
||||
class LayoutLMv2ModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=2,
|
||||
num_channels=3,
|
||||
image_size=4,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_input_mask=True,
|
||||
use_token_type_ids=True,
|
||||
use_labels=True,
|
||||
vocab_size=99,
|
||||
hidden_size=36,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=37,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.1,
|
||||
attention_probs_dropout_prob=0.1,
|
||||
max_position_embeddings=512,
|
||||
type_vocab_size=16,
|
||||
type_sequence_label_size=2,
|
||||
initializer_range=0.02,
|
||||
image_feature_pool_shape=[7, 7, 32],
|
||||
coordinate_size=6,
|
||||
shape_size=6,
|
||||
num_labels=3,
|
||||
num_choices=4,
|
||||
scope=None,
|
||||
range_bbox=1000,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_input_mask = use_input_mask
|
||||
self.use_token_type_ids = use_token_type_ids
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.type_vocab_size = type_vocab_size
|
||||
self.type_sequence_label_size = type_sequence_label_size
|
||||
self.initializer_range = initializer_range
|
||||
self.image_feature_pool_shape = image_feature_pool_shape
|
||||
self.coordinate_size = coordinate_size
|
||||
self.shape_size = shape_size
|
||||
self.num_labels = num_labels
|
||||
self.num_choices = num_choices
|
||||
self.scope = scope
|
||||
self.range_bbox = range_bbox
|
||||
detectron2_config = LayoutLMv2Config.get_default_detectron2_config()
|
||||
# We need to make the model smaller
|
||||
detectron2_config["MODEL.RESNETS.DEPTH"] = 50
|
||||
detectron2_config["MODEL.RESNETS.RES2_OUT_CHANNELS"] = 4
|
||||
detectron2_config["MODEL.RESNETS.STEM_OUT_CHANNELS"] = 4
|
||||
detectron2_config["MODEL.FPN.OUT_CHANNELS"] = 32
|
||||
detectron2_config["MODEL.RESNETS.NUM_GROUPS"] = 1
|
||||
self.detectron2_config = detectron2_config
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
|
||||
# Ensure that bbox is legal
|
||||
for i in range(bbox.shape[0]):
|
||||
for j in range(bbox.shape[1]):
|
||||
if bbox[i, j, 3] < bbox[i, j, 1]:
|
||||
t = bbox[i, j, 3]
|
||||
bbox[i, j, 3] = bbox[i, j, 1]
|
||||
bbox[i, j, 1] = t
|
||||
if bbox[i, j, 2] < bbox[i, j, 0]:
|
||||
t = bbox[i, j, 2]
|
||||
bbox[i, j, 2] = bbox[i, j, 0]
|
||||
bbox[i, j, 0] = t
|
||||
|
||||
image = ImageList(
|
||||
torch.zeros(self.batch_size, self.num_channels, self.image_size, self.image_size, device=torch_device),
|
||||
self.image_size,
|
||||
)
|
||||
|
||||
input_mask = None
|
||||
if self.use_input_mask:
|
||||
input_mask = random_attention_mask([self.batch_size, self.seq_length])
|
||||
|
||||
token_type_ids = None
|
||||
if self.use_token_type_ids:
|
||||
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
|
||||
|
||||
sequence_labels = None
|
||||
token_labels = None
|
||||
if self.use_labels:
|
||||
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
|
||||
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
|
||||
|
||||
config = LayoutLMv2Config(
|
||||
vocab_size=self.vocab_size,
|
||||
hidden_size=self.hidden_size,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
intermediate_size=self.intermediate_size,
|
||||
hidden_act=self.hidden_act,
|
||||
hidden_dropout_prob=self.hidden_dropout_prob,
|
||||
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
|
||||
max_position_embeddings=self.max_position_embeddings,
|
||||
type_vocab_size=self.type_vocab_size,
|
||||
is_decoder=False,
|
||||
initializer_range=self.initializer_range,
|
||||
image_feature_pool_shape=self.image_feature_pool_shape,
|
||||
coordinate_size=self.coordinate_size,
|
||||
shape_size=self.shape_size,
|
||||
detectron2_config_args=self.detectron2_config,
|
||||
)
|
||||
|
||||
return config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
|
||||
|
||||
def create_and_check_model(
|
||||
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
|
||||
):
|
||||
model = LayoutLMv2Model(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(input_ids, bbox=bbox, image=image, attention_mask=input_mask, token_type_ids=token_type_ids)
|
||||
result = model(input_ids, bbox=bbox, image=image, token_type_ids=token_type_ids)
|
||||
result = model(input_ids, bbox=bbox, image=image)
|
||||
|
||||
# LayoutLMv2 has a different expected sequence length, namely also visual tokens are added
|
||||
expected_seq_len = self.seq_length + self.image_feature_pool_shape[0] * self.image_feature_pool_shape[1]
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, self.hidden_size))
|
||||
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
|
||||
|
||||
def create_and_check_for_sequence_classification(
|
||||
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
|
||||
):
|
||||
config.num_labels = self.num_labels
|
||||
model = LayoutLMv2ForSequenceClassification(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(
|
||||
input_ids,
|
||||
bbox=bbox,
|
||||
image=image,
|
||||
attention_mask=input_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
labels=sequence_labels,
|
||||
)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
|
||||
|
||||
def create_and_check_for_token_classification(
|
||||
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
|
||||
):
|
||||
config.num_labels = self.num_labels
|
||||
model = LayoutLMv2ForTokenClassification(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(
|
||||
input_ids,
|
||||
bbox=bbox,
|
||||
image=image,
|
||||
attention_mask=input_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
labels=token_labels,
|
||||
)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
|
||||
|
||||
def create_and_check_for_question_answering(
|
||||
self, config, input_ids, bbox, image, token_type_ids, input_mask, sequence_labels, token_labels
|
||||
):
|
||||
model = LayoutLMv2ForQuestionAnswering(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(
|
||||
input_ids,
|
||||
bbox=bbox,
|
||||
image=image,
|
||||
attention_mask=input_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
start_positions=sequence_labels,
|
||||
end_positions=sequence_labels,
|
||||
)
|
||||
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
|
||||
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
image,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
) = config_and_inputs
|
||||
inputs_dict = {
|
||||
"input_ids": input_ids,
|
||||
"bbox": bbox,
|
||||
"image": image,
|
||||
"token_type_ids": token_type_ids,
|
||||
"attention_mask": input_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_non_xpu
|
||||
@require_torch
|
||||
@require_detectron2
|
||||
class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
test_pruning = False
|
||||
test_torchscript = True
|
||||
test_mismatched_shapes = False
|
||||
|
||||
all_model_classes = (
|
||||
(
|
||||
LayoutLMv2Model,
|
||||
LayoutLMv2ForSequenceClassification,
|
||||
LayoutLMv2ForTokenClassification,
|
||||
LayoutLMv2ForQuestionAnswering,
|
||||
)
|
||||
if is_torch_available()
|
||||
else ()
|
||||
)
|
||||
pipeline_model_mapping = (
|
||||
{"document-question-answering": LayoutLMv2ForQuestionAnswering, "feature-extraction": LayoutLMv2Model}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = LayoutLMv2ModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
@require_torch_multi_gpu
|
||||
@unittest.skip(
|
||||
reason=(
|
||||
"LayoutLMV2 and its dependency `detectron2` have some layers using `add_module` which doesn't work well"
|
||||
" with `nn.DataParallel`"
|
||||
)
|
||||
)
|
||||
def test_multi_gpu_data_parallel_forward(self):
|
||||
pass
|
||||
|
||||
def test_model_various_embeddings(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
for type in ["absolute", "relative_key", "relative_key_query"]:
|
||||
config_and_inputs[0].position_embedding_type = type
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_for_sequence_classification(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
|
||||
|
||||
def test_for_token_classification(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
|
||||
|
||||
def test_for_question_answering(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
|
||||
|
||||
def test_attention_outputs(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.return_dict = True
|
||||
|
||||
# LayoutLMv2 has a different expected sequence length
|
||||
expected_seq_len = (
|
||||
self.model_tester.seq_length
|
||||
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
|
||||
)
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = False
|
||||
config.return_dict = True
|
||||
model = model_class._from_config(config, attn_implementation="eager")
|
||||
config = model.config
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
# check that output_attentions also work using config
|
||||
del inputs_dict["output_attentions"]
|
||||
config.output_attentions = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
|
||||
)
|
||||
out_len = len(outputs)
|
||||
|
||||
# Check attention is always last and order is fine
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if hasattr(self.model_tester, "num_hidden_states_types"):
|
||||
added_hidden_states = self.model_tester.num_hidden_states_types
|
||||
else:
|
||||
added_hidden_states = 1
|
||||
self.assertEqual(out_len + added_hidden_states, len(outputs))
|
||||
|
||||
self_attentions = outputs.attentions
|
||||
|
||||
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
|
||||
self.assertListEqual(
|
||||
list(self_attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, expected_seq_len, expected_seq_len],
|
||||
)
|
||||
|
||||
def test_hidden_states_output(self):
|
||||
def check_hidden_states_output(inputs_dict, config, model_class):
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
hidden_states = outputs.hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
|
||||
)
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
# LayoutLMv2 has a different expected sequence length
|
||||
expected_seq_len = (
|
||||
self.model_tester.seq_length
|
||||
+ self.model_tester.image_feature_pool_shape[0] * self.model_tester.image_feature_pool_shape[1]
|
||||
)
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[expected_seq_len, self.model_tester.hidden_size],
|
||||
)
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# check that output_hidden_states also work using config
|
||||
del inputs_dict["output_hidden_states"]
|
||||
config.output_hidden_states = True
|
||||
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "microsoft/layoutlmv2-base-uncased"
|
||||
model = LayoutLMv2Model.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if "backbone" in name or "visual_segment_embedding" in name:
|
||||
continue
|
||||
|
||||
if param.requires_grad:
|
||||
self.assertIn(
|
||||
((param.data.mean() * 1e9).round() / 1e9).item(),
|
||||
[0.0, 1.0],
|
||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||
)
|
||||
|
||||
def test_batching_equivalence(self):
|
||||
def equivalence(tensor1, tensor2):
|
||||
return 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=0)
|
||||
|
||||
def recursive_check(batched_object, single_row_object, model_name, key):
|
||||
if isinstance(batched_object, (list, tuple)):
|
||||
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
|
||||
recursive_check(batched_object_value, single_row_object_value, model_name, key)
|
||||
elif batched_object is None:
|
||||
return
|
||||
else:
|
||||
batched_row = batched_object[:1]
|
||||
self.assertFalse(
|
||||
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
|
||||
)
|
||||
self.assertFalse(
|
||||
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
|
||||
)
|
||||
self.assertFalse(
|
||||
torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}"
|
||||
)
|
||||
self.assertFalse(
|
||||
torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}"
|
||||
)
|
||||
self.assertTrue(
|
||||
(equivalence(batched_row, single_row_object)) <= 1e-03,
|
||||
msg=(
|
||||
f"Batched and Single row outputs are not equal in {model_name} for key={key}. "
|
||||
f"Difference={equivalence(batched_row, single_row_object)}."
|
||||
),
|
||||
)
|
||||
|
||||
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
config.output_hidden_states = True
|
||||
|
||||
model_name = model_class.__name__
|
||||
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
|
||||
model = model_class(config).to(torch_device).eval()
|
||||
batch_size = self.model_tester.batch_size
|
||||
|
||||
single_row_input = {}
|
||||
for key, value in batched_input_prepared.items():
|
||||
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
|
||||
single_batch_shape = value.shape[0] // batch_size
|
||||
single_row_input[key] = value[:single_batch_shape]
|
||||
elif hasattr(value, "tensor"):
|
||||
# layoutlmv2uses ImageList instead of pixel values (needs for torchscript)
|
||||
single_row_input[key] = value.tensor[:single_batch_shape]
|
||||
|
||||
with torch.no_grad():
|
||||
model_batched_output = model(**batched_input_prepared)
|
||||
model_row_output = model(**single_row_input)
|
||||
|
||||
for key in model_batched_output:
|
||||
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
|
||||
|
||||
|
||||
def prepare_layoutlmv2_batch_inputs():
|
||||
# Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on:
|
||||
# fmt: off
|
||||
input_ids = torch.tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]]) # noqa: E231
|
||||
bbox = torch.tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]]) # noqa: E231
|
||||
image = ImageList(torch.randn((2,3,224,224)), image_sizes=[(224,224), (224,224)]) # noqa: E231
|
||||
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
|
||||
token_type_ids = torch.tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
|
||||
# fmt: on
|
||||
|
||||
return input_ids, bbox, image, attention_mask, token_type_ids
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_detectron2
|
||||
class LayoutLMv2ModelIntegrationTest(unittest.TestCase):
|
||||
@slow
|
||||
def test_inference_no_head(self):
|
||||
model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased").to(torch_device)
|
||||
|
||||
(
|
||||
input_ids,
|
||||
bbox,
|
||||
image,
|
||||
attention_mask,
|
||||
token_type_ids,
|
||||
) = prepare_layoutlmv2_batch_inputs()
|
||||
|
||||
# forward pass
|
||||
outputs = model(
|
||||
input_ids=input_ids.to(torch_device),
|
||||
bbox=bbox.to(torch_device),
|
||||
image=image.to(torch_device),
|
||||
attention_mask=attention_mask.to(torch_device),
|
||||
token_type_ids=token_type_ids.to(torch_device),
|
||||
)
|
||||
|
||||
# verify the sequence output
|
||||
expected_shape = torch.Size(
|
||||
(
|
||||
2,
|
||||
input_ids.shape[1]
|
||||
+ model.config.image_feature_pool_shape[0] * model.config.image_feature_pool_shape[1],
|
||||
model.config.hidden_size,
|
||||
)
|
||||
)
|
||||
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[-0.1087, 0.0727, -0.3075], [0.0799, -0.0427, -0.0751], [-0.0367, 0.0480, -0.1358]], device=torch_device
|
||||
)
|
||||
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
|
||||
|
||||
# verify the pooled output
|
||||
expected_shape = torch.Size((2, model.config.hidden_size))
|
||||
self.assertEqual(outputs.pooler_output.shape, expected_shape)
|
||||
@@ -0,0 +1,451 @@
|
||||
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from functools import cached_property
|
||||
|
||||
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
|
||||
from transformers.models.layoutlmv2 import LayoutLMv2Processor, LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast
|
||||
from transformers.models.layoutlmv2.tokenization_layoutlmv2 import VOCAB_FILES_NAMES
|
||||
from transformers.testing_utils import require_pytesseract, require_tokenizers, require_torch, slow
|
||||
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pytesseract_available
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin
|
||||
|
||||
|
||||
if is_pytesseract_available():
|
||||
from transformers import LayoutLMv2ImageProcessor
|
||||
|
||||
|
||||
@require_pytesseract
|
||||
@require_tokenizers
|
||||
class LayoutLMv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
tokenizer_class = LayoutLMv2Tokenizer
|
||||
rust_tokenizer_class = LayoutLMv2TokenizerFast
|
||||
processor_class = LayoutLMv2Processor
|
||||
|
||||
def setUp(self):
|
||||
vocab_tokens = [
|
||||
"[UNK]",
|
||||
"[CLS]",
|
||||
"[SEP]",
|
||||
"[PAD]",
|
||||
"[MASK]",
|
||||
"want",
|
||||
"##want",
|
||||
"##ed",
|
||||
"wa",
|
||||
"un",
|
||||
"runn",
|
||||
"##ing",
|
||||
",",
|
||||
"low",
|
||||
"lowest",
|
||||
]
|
||||
|
||||
image_processor_map = {
|
||||
"do_resize": True,
|
||||
"size": 224,
|
||||
"apply_ocr": True,
|
||||
}
|
||||
|
||||
self.tmpdirname = tempfile.mkdtemp()
|
||||
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
|
||||
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
|
||||
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
|
||||
self.image_processing_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
|
||||
with open(self.image_processing_file, "w", encoding="utf-8") as fp:
|
||||
fp.write(json.dumps(image_processor_map) + "\n")
|
||||
|
||||
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
|
||||
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
|
||||
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
def get_tokenizers(self, **kwargs) -> list[PreTrainedTokenizerBase]:
|
||||
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return LayoutLMv2ImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmpdirname)
|
||||
|
||||
def test_save_load_pretrained_default(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizers = self.get_tokenizers()
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
processor = LayoutLMv2Processor.from_pretrained(self.tmpdirname)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast))
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
|
||||
|
||||
def test_save_load_pretrained_additional_features(self):
|
||||
processor = LayoutLMv2Processor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer())
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
|
||||
# slow tokenizer
|
||||
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
|
||||
|
||||
processor = LayoutLMv2Processor.from_pretrained(
|
||||
self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, LayoutLMv2Tokenizer)
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
|
||||
|
||||
# fast tokenizer
|
||||
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
|
||||
|
||||
processor = LayoutLMv2Processor.from_pretrained(
|
||||
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, LayoutLMv2TokenizerFast)
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv2ImageProcessor)
|
||||
|
||||
@slow
|
||||
def test_overflowing_tokens(self):
|
||||
# In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
# set up
|
||||
datasets = load_dataset("nielsr/funsd")
|
||||
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
|
||||
|
||||
def preprocess_data(examples):
|
||||
images = [image.convert("RGB") for image in examples["image"]]
|
||||
words = examples["words"]
|
||||
boxes = examples["bboxes"]
|
||||
word_labels = examples["ner_tags"]
|
||||
encoded_inputs = processor(
|
||||
images,
|
||||
words,
|
||||
boxes=boxes,
|
||||
word_labels=word_labels,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_overflowing_tokens=True,
|
||||
stride=50,
|
||||
return_offsets_mapping=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
return encoded_inputs
|
||||
|
||||
train_data = preprocess_data(datasets["train"])
|
||||
|
||||
self.assertEqual(len(train_data["image"]), len(train_data["input_ids"]))
|
||||
|
||||
|
||||
# different use cases tests
|
||||
@require_torch
|
||||
@require_pytesseract
|
||||
class LayoutLMv2ProcessorIntegrationTests(unittest.TestCase):
|
||||
@cached_property
|
||||
def get_images(self):
|
||||
# we verify our implementation on 2 document images from the DocVQA dataset
|
||||
from datasets import load_dataset
|
||||
|
||||
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
|
||||
return ds[0]["image"].convert("RGB"), ds[1]["image"].convert("RGB")
|
||||
|
||||
@cached_property
|
||||
def get_tokenizers(self):
|
||||
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
||||
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
||||
return [slow_tokenizer, fast_tokenizer]
|
||||
|
||||
@slow
|
||||
def test_processor_case_1(self):
|
||||
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
|
||||
|
||||
image_processor = LayoutLMv2ImageProcessor()
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
input_image_proc = image_processor(images[0], return_tensors="pt")
|
||||
input_processor = processor(images[0], return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify image
|
||||
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
input_image_proc = image_processor(images, return_tensors="pt")
|
||||
input_processor = processor(images, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify images
|
||||
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # fmt: skip
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
@slow
|
||||
def test_processor_case_2(self):
|
||||
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
words = ["hello", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
|
||||
actual_keys = list(input_processor.keys())
|
||||
for key in expected_keys:
|
||||
self.assertIn(key, actual_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] hello world [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
|
||||
decoding = processor.decode(input_processor.input_ids[0].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [
|
||||
[0, 0, 0, 0],
|
||||
[3, 2, 5, 1],
|
||||
[6, 7, 4, 2],
|
||||
[3, 9, 2, 4],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1000, 1000, 1000, 1000],
|
||||
]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
@slow
|
||||
def test_processor_case_3(self):
|
||||
# case 3: token classification (training), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
words = ["weirdly", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
word_labels = [1, 2]
|
||||
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] weirdly world [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify labels
|
||||
expected_labels = [-100, 1, -100, 2, -100]
|
||||
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
|
||||
|
||||
# batched
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
word_labels = [[1, 2], [6, 3, 10, 2]]
|
||||
input_processor = processor(
|
||||
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
|
||||
)
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] my name is niels [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [
|
||||
[0, 0, 0, 0],
|
||||
[3, 2, 5, 1],
|
||||
[6, 7, 4, 2],
|
||||
[3, 9, 2, 4],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1000, 1000, 1000, 1000],
|
||||
]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
# verify labels
|
||||
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
|
||||
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
|
||||
|
||||
@slow
|
||||
def test_processor_case_4(self):
|
||||
# case 4: visual question answering (inference), apply_ocr=True
|
||||
|
||||
image_processor = LayoutLMv2ImageProcessor()
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
question = "What's his name?"
|
||||
input_processor = processor(images[0], question, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
questions = ["How old is he?", "what's the time"]
|
||||
input_processor = processor(
|
||||
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
|
||||
)
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # fmt: skip
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
@slow
|
||||
def test_processor_case_5(self):
|
||||
# case 5: visual question answering (inference), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
question = "What's his name?"
|
||||
words = ["hello", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
questions = ["How old is he?", "what's the time"]
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
|
||||
decoding = processor.decode(input_processor.input_ids[0].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
|
||||
2460
transformers/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
Normal file
2460
transformers/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user