init
This commit is contained in:
0
transformers/tests/models/rt_detr/__init__.py
Normal file
0
transformers/tests/models/rt_detr/__init__.py
Normal file
@@ -0,0 +1,436 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import unittest
|
||||
|
||||
from transformers.image_utils import load_image
|
||||
from transformers.testing_utils import (
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
require_torchvision,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
from ...test_processing_common import url_to_local_path
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import RTDetrImageProcessor, RTDetrImageProcessorFast
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
class RTDetrImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=4,
|
||||
num_channels=3,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_rescale=True,
|
||||
rescale_factor=1 / 255,
|
||||
do_normalize=False,
|
||||
do_pad=False,
|
||||
return_tensors="pt",
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.do_resize = do_resize
|
||||
self.size = size if size is not None else {"height": 640, "width": 640}
|
||||
self.do_rescale = do_rescale
|
||||
self.rescale_factor = rescale_factor
|
||||
self.do_normalize = do_normalize
|
||||
self.do_pad = do_pad
|
||||
self.return_tensors = return_tensors
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"do_rescale": self.do_rescale,
|
||||
"rescale_factor": self.rescale_factor,
|
||||
"do_normalize": self.do_normalize,
|
||||
"do_pad": self.do_pad,
|
||||
"return_tensors": self.return_tensors,
|
||||
}
|
||||
|
||||
def get_expected_values(self):
|
||||
return self.size["height"], self.size["width"]
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
height, width = self.get_expected_values()
|
||||
return self.num_channels, height, width
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class RtDetrImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = RTDetrImageProcessor if is_vision_available() else None
|
||||
fast_image_processing_class = RTDetrImageProcessorFast if is_torchvision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = RTDetrImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processing, "size"))
|
||||
self.assertTrue(hasattr(image_processing, "resample"))
|
||||
self.assertTrue(hasattr(image_processing, "do_rescale"))
|
||||
self.assertTrue(hasattr(image_processing, "rescale_factor"))
|
||||
self.assertTrue(hasattr(image_processing, "return_tensors"))
|
||||
|
||||
def test_image_processor_from_dict_with_kwargs(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict)
|
||||
self.assertEqual(image_processor.size, {"height": 640, "width": 640})
|
||||
|
||||
def test_valid_coco_detection_annotations(self):
|
||||
# prepare image and target
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
params = {"image_id": 39769, "annotations": target}
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# encode them
|
||||
image_processing = image_processing_class.from_pretrained("PekingU/rtdetr_r50vd")
|
||||
|
||||
# legal encodings (single image)
|
||||
_ = image_processing(images=image, annotations=params, return_tensors="pt")
|
||||
_ = image_processing(images=image, annotations=[params], return_tensors="pt")
|
||||
|
||||
# legal encodings (batch of one image)
|
||||
_ = image_processing(images=[image], annotations=params, return_tensors="pt")
|
||||
_ = image_processing(images=[image], annotations=[params], return_tensors="pt")
|
||||
|
||||
# legal encoding (batch of more than one image)
|
||||
n = 5
|
||||
_ = image_processing(images=[image] * n, annotations=[params] * n, return_tensors="pt")
|
||||
|
||||
# example of an illegal encoding (missing the 'image_id' key)
|
||||
with self.assertRaises(ValueError) as e:
|
||||
image_processing(images=image, annotations={"annotations": target}, return_tensors="pt")
|
||||
|
||||
self.assertTrue(str(e.exception).startswith("Invalid COCO detection annotations"))
|
||||
|
||||
# example of an illegal encoding (unequal lengths of images and annotations)
|
||||
with self.assertRaises(ValueError) as e:
|
||||
image_processing(images=[image] * n, annotations=[params] * (n - 1), return_tensors="pt")
|
||||
|
||||
self.assertTrue(str(e.exception) == "The number of images (5) and annotations (4) do not match.")
|
||||
|
||||
@slow
|
||||
def test_call_pytorch_with_coco_detection_annotations(self):
|
||||
# prepare image and target
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
target = {"image_id": 39769, "annotations": target}
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# encode them
|
||||
image_processing = image_processing_class.from_pretrained("PekingU/rtdetr_r50vd")
|
||||
encoding = image_processing(images=image, annotations=target, return_tensors="pt")
|
||||
|
||||
# verify pixel values
|
||||
expected_shape = torch.Size([1, 3, 640, 640])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([0.5490, 0.5647, 0.5725])
|
||||
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
|
||||
# verify area
|
||||
expected_area = torch.tensor([2827.9883, 5403.4761, 235036.7344, 402070.2188, 71068.8281, 79601.2812])
|
||||
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
|
||||
# verify boxes
|
||||
expected_boxes_shape = torch.Size([6, 4])
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
|
||||
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
|
||||
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
|
||||
# verify image_id
|
||||
expected_image_id = torch.tensor([39769])
|
||||
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
|
||||
# verify is_crowd
|
||||
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
|
||||
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
|
||||
# verify class_labels
|
||||
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
|
||||
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
|
||||
# verify orig_size
|
||||
expected_orig_size = torch.tensor([480, 640])
|
||||
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
|
||||
# verify size
|
||||
expected_size = torch.tensor([640, 640])
|
||||
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
|
||||
|
||||
@slow
|
||||
def test_image_processor_outputs(self):
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
encoding = image_processing(images=image, return_tensors="pt")
|
||||
|
||||
# verify pixel values: shape
|
||||
expected_shape = torch.Size([1, 3, 640, 640])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
# verify pixel values: output values
|
||||
expected_slice = torch.tensor([0.5490196347236633, 0.5647059082984924, 0.572549045085907])
|
||||
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-5, atol=1e-5)
|
||||
|
||||
def test_multiple_images_processor_outputs(self):
|
||||
images_urls = [
|
||||
"http://images.cocodataset.org/val2017/000000000139.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000285.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000632.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000724.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000776.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000785.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000802.jpg",
|
||||
"http://images.cocodataset.org/val2017/000000000872.jpg",
|
||||
]
|
||||
|
||||
images = []
|
||||
for url in images_urls:
|
||||
image = load_image(url_to_local_path(url))
|
||||
images.append(image)
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# apply image processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
encoding = image_processing(images=images, return_tensors="pt")
|
||||
|
||||
# verify if pixel_values is part of the encoding
|
||||
self.assertIn("pixel_values", encoding)
|
||||
|
||||
# verify pixel values: shape
|
||||
expected_shape = torch.Size([8, 3, 640, 640])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
# verify pixel values: output values
|
||||
expected_slices = torch.tensor(
|
||||
[
|
||||
[0.5333333611488342, 0.5568627715110779, 0.5647059082984924],
|
||||
[0.5372549295425415, 0.4705882668495178, 0.4274510145187378],
|
||||
[0.3960784673690796, 0.35686275362968445, 0.3686274588108063],
|
||||
[0.20784315466880798, 0.1882353127002716, 0.15294118225574493],
|
||||
[0.364705890417099, 0.364705890417099, 0.3686274588108063],
|
||||
[0.8078432083129883, 0.8078432083129883, 0.8078432083129883],
|
||||
[0.4431372880935669, 0.4431372880935669, 0.4431372880935669],
|
||||
[0.19607844948768616, 0.21176472306251526, 0.3607843220233917],
|
||||
]
|
||||
)
|
||||
torch.testing.assert_close(encoding["pixel_values"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5)
|
||||
|
||||
@slow
|
||||
def test_batched_coco_detection_annotations(self):
|
||||
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
|
||||
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
annotations_0 = {"image_id": 39769, "annotations": target}
|
||||
annotations_1 = {"image_id": 39769, "annotations": target}
|
||||
|
||||
# Adjust the bounding boxes for the resized image
|
||||
w_0, h_0 = image_0.size
|
||||
w_1, h_1 = image_1.size
|
||||
for i in range(len(annotations_1["annotations"])):
|
||||
coords = annotations_1["annotations"][i]["bbox"]
|
||||
new_bbox = [
|
||||
coords[0] * w_1 / w_0,
|
||||
coords[1] * h_1 / h_0,
|
||||
coords[2] * w_1 / w_0,
|
||||
coords[3] * h_1 / h_0,
|
||||
]
|
||||
annotations_1["annotations"][i]["bbox"] = new_bbox
|
||||
|
||||
images = [image_0, image_1]
|
||||
annotations = [annotations_0, annotations_1]
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class()
|
||||
encoding = image_processing(
|
||||
images=images,
|
||||
annotations=annotations,
|
||||
return_segmentation_masks=True,
|
||||
return_tensors="pt", # do_convert_annotations=True
|
||||
)
|
||||
|
||||
# Check the pixel values have been padded
|
||||
postprocessed_height, postprocessed_width = 640, 640
|
||||
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
# Check the bounding boxes have been adjusted for padded images
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
|
||||
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
|
||||
expected_boxes_0 = torch.tensor(
|
||||
[
|
||||
[0.6879, 0.4609, 0.0755, 0.3691],
|
||||
[0.2118, 0.3359, 0.2601, 0.1566],
|
||||
[0.5011, 0.5000, 0.9979, 1.0000],
|
||||
[0.5010, 0.5020, 0.9979, 0.9959],
|
||||
[0.3284, 0.5944, 0.5884, 0.8112],
|
||||
[0.8394, 0.5445, 0.3213, 0.9110],
|
||||
]
|
||||
)
|
||||
expected_boxes_1 = torch.tensor(
|
||||
[
|
||||
[0.5503, 0.2765, 0.0604, 0.2215],
|
||||
[0.1695, 0.2016, 0.2080, 0.0940],
|
||||
[0.5006, 0.4933, 0.9977, 0.9865],
|
||||
[0.5008, 0.5002, 0.9983, 0.9955],
|
||||
[0.2627, 0.5456, 0.4707, 0.8646],
|
||||
[0.7715, 0.4115, 0.4570, 0.7161],
|
||||
]
|
||||
)
|
||||
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
|
||||
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
|
||||
|
||||
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
|
||||
# format and not in the range [0, 1]
|
||||
encoding = image_processing(
|
||||
images=images,
|
||||
annotations=annotations,
|
||||
return_segmentation_masks=True,
|
||||
do_convert_annotations=False,
|
||||
return_tensors="pt",
|
||||
)
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
|
||||
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
|
||||
# Convert to absolute coordinates
|
||||
unnormalized_boxes_0 = torch.vstack(
|
||||
[
|
||||
expected_boxes_0[:, 0] * postprocessed_width,
|
||||
expected_boxes_0[:, 1] * postprocessed_height,
|
||||
expected_boxes_0[:, 2] * postprocessed_width,
|
||||
expected_boxes_0[:, 3] * postprocessed_height,
|
||||
]
|
||||
).T
|
||||
unnormalized_boxes_1 = torch.vstack(
|
||||
[
|
||||
expected_boxes_1[:, 0] * postprocessed_width,
|
||||
expected_boxes_1[:, 1] * postprocessed_height,
|
||||
expected_boxes_1[:, 2] * postprocessed_width,
|
||||
expected_boxes_1[:, 3] * postprocessed_height,
|
||||
]
|
||||
).T
|
||||
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
|
||||
expected_boxes_0 = torch.vstack(
|
||||
[
|
||||
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
|
||||
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
|
||||
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
|
||||
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
|
||||
]
|
||||
).T
|
||||
expected_boxes_1 = torch.vstack(
|
||||
[
|
||||
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
|
||||
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
|
||||
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
|
||||
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
|
||||
]
|
||||
).T
|
||||
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
|
||||
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
|
||||
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
@require_torchvision
|
||||
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations
|
||||
def test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations(self):
|
||||
# prepare image and target
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
target = {"image_id": 39769, "annotations": target}
|
||||
|
||||
processor = self.image_processor_list[1]()
|
||||
# 1. run processor on CPU
|
||||
encoding_cpu = processor(images=image, annotations=target, return_tensors="pt", device="cpu")
|
||||
# 2. run processor on accelerator
|
||||
encoding_gpu = processor(images=image, annotations=target, return_tensors="pt", device=torch_device)
|
||||
|
||||
# verify pixel values
|
||||
self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape)
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
encoding_cpu["pixel_values"][0, 0, 0, :3],
|
||||
encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"),
|
||||
atol=1e-4,
|
||||
)
|
||||
)
|
||||
# verify area
|
||||
torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu"))
|
||||
# verify boxes
|
||||
self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape)
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3
|
||||
)
|
||||
)
|
||||
# verify image_id
|
||||
torch.testing.assert_close(
|
||||
encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu")
|
||||
)
|
||||
# verify is_crowd
|
||||
torch.testing.assert_close(
|
||||
encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu")
|
||||
)
|
||||
# verify class_labels
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu")
|
||||
)
|
||||
)
|
||||
# verify orig_size
|
||||
torch.testing.assert_close(
|
||||
encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu")
|
||||
)
|
||||
# verify size
|
||||
torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
|
||||
801
transformers/tests/models/rt_detr/test_modeling_rt_detr.py
Normal file
801
transformers/tests/models/rt_detr/test_modeling_rt_detr.py
Normal file
@@ -0,0 +1,801 @@
|
||||
# coding = utf-8
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch RT_DETR model."""
|
||||
|
||||
import inspect
|
||||
import math
|
||||
import tempfile
|
||||
import unittest
|
||||
from functools import cached_property
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from transformers import (
|
||||
RTDetrConfig,
|
||||
RTDetrImageProcessor,
|
||||
RTDetrResNetConfig,
|
||||
is_torch_available,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import RTDetrForObjectDetection, RTDetrModel
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
|
||||
CHECKPOINT = "PekingU/rtdetr_r50vd" # TODO: replace
|
||||
|
||||
|
||||
class RTDetrModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=3,
|
||||
is_training=True,
|
||||
use_labels=True,
|
||||
n_targets=3,
|
||||
num_labels=10,
|
||||
initializer_range=0.02,
|
||||
layer_norm_eps=1e-5,
|
||||
batch_norm_eps=1e-5,
|
||||
# backbone
|
||||
backbone_config=None,
|
||||
# encoder HybridEncoder
|
||||
encoder_hidden_dim=32,
|
||||
encoder_in_channels=[128, 256, 512],
|
||||
feat_strides=[8, 16, 32],
|
||||
encoder_layers=1,
|
||||
encoder_ffn_dim=64,
|
||||
encoder_attention_heads=2,
|
||||
dropout=0.0,
|
||||
activation_dropout=0.0,
|
||||
encode_proj_layers=[2],
|
||||
positional_encoding_temperature=10000,
|
||||
encoder_activation_function="gelu",
|
||||
activation_function="silu",
|
||||
eval_size=None,
|
||||
normalize_before=False,
|
||||
# decoder RTDetrTransformer
|
||||
d_model=32,
|
||||
num_queries=30,
|
||||
decoder_in_channels=[32, 32, 32],
|
||||
decoder_ffn_dim=64,
|
||||
num_feature_levels=3,
|
||||
decoder_n_points=4,
|
||||
decoder_layers=2,
|
||||
decoder_attention_heads=2,
|
||||
decoder_activation_function="relu",
|
||||
attention_dropout=0.0,
|
||||
num_denoising=0,
|
||||
label_noise_ratio=0.5,
|
||||
box_noise_scale=1.0,
|
||||
learn_initial_query=False,
|
||||
anchor_image_size=None,
|
||||
image_size=64,
|
||||
disable_custom_kernels=True,
|
||||
with_box_refine=True,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = 3
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.n_targets = n_targets
|
||||
self.num_labels = num_labels
|
||||
self.initializer_range = initializer_range
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.batch_norm_eps = batch_norm_eps
|
||||
self.backbone_config = backbone_config
|
||||
self.encoder_hidden_dim = encoder_hidden_dim
|
||||
self.encoder_in_channels = encoder_in_channels
|
||||
self.feat_strides = feat_strides
|
||||
self.encoder_layers = encoder_layers
|
||||
self.encoder_ffn_dim = encoder_ffn_dim
|
||||
self.encoder_attention_heads = encoder_attention_heads
|
||||
self.dropout = dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.encode_proj_layers = encode_proj_layers
|
||||
self.positional_encoding_temperature = positional_encoding_temperature
|
||||
self.encoder_activation_function = encoder_activation_function
|
||||
self.activation_function = activation_function
|
||||
self.eval_size = eval_size
|
||||
self.normalize_before = normalize_before
|
||||
self.d_model = d_model
|
||||
self.num_queries = num_queries
|
||||
self.decoder_in_channels = decoder_in_channels
|
||||
self.decoder_ffn_dim = decoder_ffn_dim
|
||||
self.num_feature_levels = num_feature_levels
|
||||
self.decoder_n_points = decoder_n_points
|
||||
self.decoder_layers = decoder_layers
|
||||
self.decoder_attention_heads = decoder_attention_heads
|
||||
self.decoder_activation_function = decoder_activation_function
|
||||
self.attention_dropout = attention_dropout
|
||||
self.num_denoising = num_denoising
|
||||
self.label_noise_ratio = label_noise_ratio
|
||||
self.box_noise_scale = box_noise_scale
|
||||
self.learn_initial_query = learn_initial_query
|
||||
self.anchor_image_size = anchor_image_size
|
||||
self.image_size = image_size
|
||||
self.disable_custom_kernels = disable_custom_kernels
|
||||
self.with_box_refine = with_box_refine
|
||||
|
||||
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
|
||||
|
||||
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
|
||||
|
||||
labels = None
|
||||
if self.use_labels:
|
||||
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
|
||||
labels = []
|
||||
for i in range(self.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.randint(
|
||||
high=self.num_labels, size=(self.n_targets,), device=torch_device
|
||||
)
|
||||
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
|
||||
labels.append(target)
|
||||
|
||||
config = self.get_config()
|
||||
config.num_labels = self.num_labels
|
||||
return config, pixel_values, pixel_mask, labels
|
||||
|
||||
def get_config(self):
|
||||
hidden_sizes = [10, 20, 30, 40]
|
||||
backbone_config = RTDetrResNetConfig(
|
||||
embeddings_size=10,
|
||||
hidden_sizes=hidden_sizes,
|
||||
depths=[1, 1, 2, 1],
|
||||
out_features=["stage2", "stage3", "stage4"],
|
||||
out_indices=[2, 3, 4],
|
||||
)
|
||||
return RTDetrConfig.from_backbone_configs(
|
||||
backbone_config=backbone_config,
|
||||
encoder_hidden_dim=self.encoder_hidden_dim,
|
||||
encoder_in_channels=hidden_sizes[1:],
|
||||
feat_strides=self.feat_strides,
|
||||
encoder_layers=self.encoder_layers,
|
||||
encoder_ffn_dim=self.encoder_ffn_dim,
|
||||
encoder_attention_heads=self.encoder_attention_heads,
|
||||
dropout=self.dropout,
|
||||
activation_dropout=self.activation_dropout,
|
||||
encode_proj_layers=self.encode_proj_layers,
|
||||
positional_encoding_temperature=self.positional_encoding_temperature,
|
||||
encoder_activation_function=self.encoder_activation_function,
|
||||
activation_function=self.activation_function,
|
||||
eval_size=self.eval_size,
|
||||
normalize_before=self.normalize_before,
|
||||
d_model=self.d_model,
|
||||
num_queries=self.num_queries,
|
||||
decoder_in_channels=self.decoder_in_channels,
|
||||
decoder_ffn_dim=self.decoder_ffn_dim,
|
||||
num_feature_levels=self.num_feature_levels,
|
||||
decoder_n_points=self.decoder_n_points,
|
||||
decoder_layers=self.decoder_layers,
|
||||
decoder_attention_heads=self.decoder_attention_heads,
|
||||
decoder_activation_function=self.decoder_activation_function,
|
||||
attention_dropout=self.attention_dropout,
|
||||
num_denoising=self.num_denoising,
|
||||
label_noise_ratio=self.label_noise_ratio,
|
||||
box_noise_scale=self.box_noise_scale,
|
||||
learn_initial_query=self.learn_initial_query,
|
||||
anchor_image_size=self.anchor_image_size,
|
||||
image_size=self.image_size,
|
||||
disable_custom_kernels=self.disable_custom_kernels,
|
||||
with_box_refine=self.with_box_refine,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
|
||||
inputs_dict = {"pixel_values": pixel_values}
|
||||
return config, inputs_dict
|
||||
|
||||
def create_and_check_rt_detr_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = RTDetrModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model))
|
||||
|
||||
def create_and_check_rt_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = RTDetrForObjectDetection(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
|
||||
|
||||
self.parent.assertEqual(result.loss.shape, ())
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
|
||||
@require_torch
|
||||
class RTDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (RTDetrModel, RTDetrForObjectDetection) if is_torch_available() else ()
|
||||
pipeline_model_mapping = (
|
||||
{"image-feature-extraction": RTDetrModel, "object-detection": RTDetrForObjectDetection}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
is_encoder_decoder = True
|
||||
test_torchscript = False
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
test_missing_keys = False
|
||||
test_torch_exportable = True
|
||||
|
||||
# special case for head models
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
|
||||
|
||||
if return_labels:
|
||||
if model_class.__name__ == "RTDetrForObjectDetection":
|
||||
labels = []
|
||||
for i in range(self.model_tester.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.ones(
|
||||
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
|
||||
)
|
||||
target["boxes"] = torch.ones(
|
||||
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
|
||||
)
|
||||
labels.append(target)
|
||||
inputs_dict["labels"] = labels
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = RTDetrModelTester(self)
|
||||
self.config_tester = ConfigTester(
|
||||
self,
|
||||
config_class=RTDetrConfig,
|
||||
has_text_modality=False,
|
||||
common_properties=["hidden_size", "num_attention_heads"],
|
||||
)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_rt_detr_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_rt_detr_model(*config_and_inputs)
|
||||
|
||||
def test_rt_detr_object_detection_head_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_rt_detr_object_detection_head_model(*config_and_inputs)
|
||||
|
||||
@unittest.skip(reason="RTDetr does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="RTDetr does not use test_inputs_embeds_matches_input_ids")
|
||||
def test_inputs_embeds_matches_input_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="RTDetr does not support input and output embeddings")
|
||||
def test_model_get_set_embeddings(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="RTDetr does not support input and output embeddings")
|
||||
def test_model_common_attributes(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="RTDetr does not use token embeddings")
|
||||
def test_resize_tokens_embeddings(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Feed forward chunking is not implemented")
|
||||
def test_feed_forward_chunking(self):
|
||||
pass
|
||||
|
||||
def test_attention_outputs(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.return_dict = True
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = False
|
||||
config.return_dict = True
|
||||
model = model_class._from_config(config, attn_implementation="eager")
|
||||
config = model.config
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
|
||||
|
||||
# check that output_attentions also work using config
|
||||
del inputs_dict["output_attentions"]
|
||||
config.output_attentions = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.encoder_attention_heads,
|
||||
self.model_tester.encoder_seq_length,
|
||||
self.model_tester.encoder_seq_length,
|
||||
],
|
||||
)
|
||||
out_len = len(outputs)
|
||||
|
||||
correct_outlen = 13
|
||||
|
||||
# loss is at first position
|
||||
if "labels" in inputs_dict:
|
||||
correct_outlen += 1 # loss is added to beginning
|
||||
# Object Detection model returns pred_logits and pred_boxes
|
||||
if model_class.__name__ == "RTDetrForObjectDetection":
|
||||
correct_outlen += 2
|
||||
|
||||
self.assertEqual(out_len, correct_outlen)
|
||||
|
||||
# decoder attentions
|
||||
decoder_attentions = outputs.decoder_attentions
|
||||
self.assertIsInstance(decoder_attentions, (list, tuple))
|
||||
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
|
||||
self.assertListEqual(
|
||||
list(decoder_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.decoder_attention_heads,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_queries,
|
||||
],
|
||||
)
|
||||
|
||||
# cross attentions
|
||||
cross_attentions = outputs.cross_attentions
|
||||
self.assertIsInstance(cross_attentions, (list, tuple))
|
||||
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
|
||||
self.assertListEqual(
|
||||
list(cross_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.decoder_attention_heads,
|
||||
self.model_tester.num_feature_levels,
|
||||
self.model_tester.decoder_n_points,
|
||||
],
|
||||
)
|
||||
|
||||
# Check attention is always last and order is fine
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if hasattr(self.model_tester, "num_hidden_states_types"):
|
||||
added_hidden_states = self.model_tester.num_hidden_states_types
|
||||
else:
|
||||
# RTDetr should maintin encoder_hidden_states output
|
||||
added_hidden_states = 2
|
||||
self.assertEqual(out_len + added_hidden_states, len(outputs))
|
||||
|
||||
self_attentions = outputs.encoder_attentions
|
||||
|
||||
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
|
||||
self.assertListEqual(
|
||||
list(self_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.encoder_attention_heads,
|
||||
self.model_tester.encoder_seq_length,
|
||||
self.model_tester.encoder_seq_length,
|
||||
],
|
||||
)
|
||||
|
||||
def test_hidden_states_output(self):
|
||||
def check_hidden_states_output(inputs_dict, config, model_class):
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
|
||||
)
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[1].shape[-2:]),
|
||||
[
|
||||
self.model_tester.image_size // self.model_tester.feat_strides[-1],
|
||||
self.model_tester.image_size // self.model_tester.feat_strides[-1],
|
||||
],
|
||||
)
|
||||
|
||||
if config.is_encoder_decoder:
|
||||
hidden_states = outputs.decoder_hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
|
||||
)
|
||||
|
||||
self.assertIsInstance(hidden_states, (list, tuple))
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[self.model_tester.num_queries, self.model_tester.d_model],
|
||||
)
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# check that output_hidden_states also work using config
|
||||
del inputs_dict["output_hidden_states"]
|
||||
config.output_hidden_states = True
|
||||
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
def test_retain_grad_hidden_states_attentions(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.output_hidden_states = True
|
||||
config.output_attentions = True
|
||||
|
||||
model_class = self.all_model_classes[0]
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
# we take the first output since last_hidden_state is the first item
|
||||
output = outputs[0]
|
||||
|
||||
encoder_hidden_states = outputs.encoder_hidden_states[0]
|
||||
encoder_attentions = outputs.encoder_attentions[0]
|
||||
encoder_hidden_states.retain_grad()
|
||||
encoder_attentions.retain_grad()
|
||||
|
||||
decoder_attentions = outputs.decoder_attentions[0]
|
||||
decoder_attentions.retain_grad()
|
||||
|
||||
cross_attentions = outputs.cross_attentions[0]
|
||||
cross_attentions.retain_grad()
|
||||
|
||||
output.flatten()[0].backward(retain_graph=True)
|
||||
|
||||
self.assertIsNotNone(encoder_hidden_states.grad)
|
||||
self.assertIsNotNone(encoder_attentions.grad)
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
expected_arg_names = ["pixel_values"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_different_timm_backbone(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# let's pick a random timm backbone
|
||||
config.backbone = "tf_mobilenetv3_small_075"
|
||||
config.backbone_config = None
|
||||
config.use_timm_backbone = True
|
||||
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if model_class.__name__ == "RTDetrForObjectDetection":
|
||||
expected_shape = (
|
||||
self.model_tester.batch_size,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_labels,
|
||||
)
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
# Confirm out_indices was propagated to backbone
|
||||
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
|
||||
else:
|
||||
# Confirm out_indices was propagated to backbone
|
||||
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
|
||||
|
||||
self.assertTrue(outputs)
|
||||
|
||||
def test_hf_backbone(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# Load a pretrained HF checkpoint as backbone
|
||||
config.backbone = "microsoft/resnet-18"
|
||||
config.backbone_config = None
|
||||
config.use_timm_backbone = False
|
||||
config.use_pretrained_backbone = True
|
||||
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if model_class.__name__ == "RTDetrForObjectDetection":
|
||||
expected_shape = (
|
||||
self.model_tester.batch_size,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_labels,
|
||||
)
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
# Confirm out_indices was propagated to backbone
|
||||
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
|
||||
else:
|
||||
# Confirm out_indices was propagated to backbone
|
||||
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
|
||||
|
||||
self.assertTrue(outputs)
|
||||
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
configs_no_init.initializer_bias_prior_prob = 0.2
|
||||
bias_value = -1.3863 # log_e ((1 - 0.2) / 0.2)
|
||||
|
||||
failed_cases = []
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
# Skip the check for the backbone
|
||||
for name, module in model.named_modules():
|
||||
if module.__class__.__name__ == "RTDetrConvEncoder":
|
||||
backbone_params = [f"{name}.{key}" for key in module.state_dict()]
|
||||
break
|
||||
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
if ("class_embed" in name and "bias" in name) or "enc_score_head.bias" in name:
|
||||
bias_tensor = torch.full_like(param.data, bias_value)
|
||||
if not torch.allclose(param.data, bias_tensor, atol=1e-4):
|
||||
failed_cases.append(
|
||||
f"Parameter {name} of model {model_class} seems not properly initialized. "
|
||||
f"Biases should be initialized to {bias_value}, got {param.data}"
|
||||
)
|
||||
elif (
|
||||
"level_embed" in name
|
||||
or "sampling_offsets.bias" in name
|
||||
or "value_proj" in name
|
||||
or "output_proj" in name
|
||||
or "reference_points" in name
|
||||
or "enc_score_head.weight" in name
|
||||
or ("class_embed" in name and "weight" in name)
|
||||
or name in backbone_params
|
||||
):
|
||||
continue
|
||||
else:
|
||||
mean = param.data.mean()
|
||||
round_mean = (mean * 1e9).round() / 1e9
|
||||
round_mean = round_mean.item()
|
||||
if round_mean not in [0.0, 1.0]:
|
||||
failed_cases.append(
|
||||
f"Parameter {name} of model {model_class} seems not properly initialized. "
|
||||
f"Mean is {round_mean}, but should be in [0, 1]"
|
||||
)
|
||||
|
||||
message = "\n" + "\n".join(failed_cases)
|
||||
self.assertTrue(not failed_cases, message)
|
||||
|
||||
@parameterized.expand(["float32", "float16", "bfloat16"])
|
||||
@require_torch_accelerator
|
||||
@slow
|
||||
def test_inference_with_different_dtypes(self, dtype_str):
|
||||
dtype = {
|
||||
"float32": torch.float32,
|
||||
"float16": torch.float16,
|
||||
"bfloat16": torch.bfloat16,
|
||||
}[dtype_str]
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device).to(dtype)
|
||||
model.eval()
|
||||
for key, tensor in inputs_dict.items():
|
||||
if tensor.dtype == torch.float32:
|
||||
inputs_dict[key] = tensor.to(dtype)
|
||||
with torch.no_grad():
|
||||
_ = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
@parameterized.expand(["float32", "float16", "bfloat16"])
|
||||
@require_torch_accelerator
|
||||
@slow
|
||||
def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str):
|
||||
dtype = {
|
||||
"float32": torch.float32,
|
||||
"float16": torch.float16,
|
||||
"bfloat16": torch.bfloat16,
|
||||
}[dtype_str]
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
h, w = inputs_dict["pixel_values"].shape[-2:]
|
||||
|
||||
# convert inputs to the desired dtype
|
||||
for key, tensor in inputs_dict.items():
|
||||
if tensor.dtype == torch.float32:
|
||||
inputs_dict[key] = tensor.to(dtype)
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
model_class(config).save_pretrained(tmpdirname)
|
||||
model_static = model_class.from_pretrained(
|
||||
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype
|
||||
).eval()
|
||||
model_dynamic = model_class.from_pretrained(
|
||||
tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype
|
||||
).eval()
|
||||
|
||||
self.assertIsNotNone(model_static.config.anchor_image_size)
|
||||
self.assertIsNone(model_dynamic.config.anchor_image_size)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
|
||||
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4
|
||||
),
|
||||
f"Max diff: {(outputs_static.last_hidden_state - outputs_dynamic.last_hidden_state).abs().max()}",
|
||||
)
|
||||
|
||||
|
||||
TOLERANCE = 1e-4
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_img():
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
return image
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class RTDetrModelIntegrationTest(unittest.TestCase):
|
||||
@cached_property
|
||||
def default_image_processor(self):
|
||||
return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None
|
||||
|
||||
def test_inference_object_detection_head(self):
|
||||
model = RTDetrForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device)
|
||||
|
||||
image_processor = self.default_image_processor
|
||||
image = prepare_img()
|
||||
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
|
||||
expected_shape_logits = torch.Size((1, 300, model.config.num_labels))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape_logits)
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[-4.64763879776001, -5.001153945922852, -4.978509902954102],
|
||||
[-4.159348487854004, -4.703853607177734, -5.946484565734863],
|
||||
[-4.437461853027344, -4.65836238861084, -6.235235691070557],
|
||||
],
|
||||
("cuda", 8): [[-4.6471, -5.0008, -4.9786], [-4.1599, -4.7041, -5.9458], [-4.4374, -4.6582, -6.2340]],
|
||||
}
|
||||
)
|
||||
expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[0.1688060760498047, 0.19992263615131378, 0.21225441992282867],
|
||||
[0.768376350402832, 0.41226309537887573, 0.4636859893798828],
|
||||
[0.25953856110572815, 0.5483334064483643, 0.4777486026287079],
|
||||
],
|
||||
("cuda", 8): [[0.1688, 0.1999, 0.2123], [0.7684, 0.4123, 0.4637], [0.2596, 0.5483, 0.4777]],
|
||||
}
|
||||
)
|
||||
expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-4, atol=2e-4)
|
||||
|
||||
expected_shape_boxes = torch.Size((1, 300, 4))
|
||||
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
|
||||
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-4)
|
||||
|
||||
# verify postprocessing
|
||||
results = image_processor.post_process_object_detection(
|
||||
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
|
||||
)[0]
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [0.9703017473220825, 0.9599503874778748, 0.9575679302215576, 0.9506784677505493],
|
||||
("cuda", 8): [0.9704, 0.9599, 0.9576, 0.9507],
|
||||
}
|
||||
)
|
||||
expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
expected_labels = [57, 15, 15, 65]
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[0.13774872, 0.37821293, 640.13074, 476.21088],
|
||||
[343.38132, 24.276838, 640.1404, 371.49573],
|
||||
[13.225126, 54.179348, 318.98422, 472.2207],
|
||||
[40.114475, 73.44104, 175.9573, 118.48469],
|
||||
],
|
||||
("cuda", 8): [
|
||||
[1.3775e-01, 3.7821e-01, 6.4013e02, 4.7621e02],
|
||||
[3.4338e02, 2.4277e01, 6.4014e02, 3.7150e02],
|
||||
[1.3225e01, 5.4179e01, 3.1898e02, 4.7222e02],
|
||||
[4.0114e01, 7.3441e01, 1.7596e02, 1.1848e02],
|
||||
],
|
||||
}
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(results["scores"][:4], expected_scores, rtol=2e-4, atol=2e-4)
|
||||
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
|
||||
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, rtol=2e-4, atol=2e-4)
|
||||
@@ -0,0 +1,100 @@
|
||||
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers import RTDetrResNetConfig
|
||||
from transformers.testing_utils import require_torch
|
||||
from transformers.utils.import_utils import is_torch_available
|
||||
|
||||
from ...test_backbone_common import BackboneTesterMixin
|
||||
from ...test_modeling_common import floats_tensor, ids_tensor
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from transformers import RTDetrResNetBackbone
|
||||
|
||||
|
||||
class RTDetrResNetModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=3,
|
||||
image_size=32,
|
||||
num_channels=3,
|
||||
embeddings_size=10,
|
||||
hidden_sizes=[10, 20, 30, 40],
|
||||
depths=[1, 1, 2, 1],
|
||||
is_training=True,
|
||||
use_labels=True,
|
||||
hidden_act="relu",
|
||||
num_labels=3,
|
||||
scope=None,
|
||||
out_features=["stage2", "stage3", "stage4"],
|
||||
out_indices=[2, 3, 4],
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.image_size = image_size
|
||||
self.num_channels = num_channels
|
||||
self.embeddings_size = embeddings_size
|
||||
self.hidden_sizes = hidden_sizes
|
||||
self.depths = depths
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.hidden_act = hidden_act
|
||||
self.num_labels = num_labels
|
||||
self.scope = scope
|
||||
self.num_stages = len(hidden_sizes)
|
||||
self.out_features = out_features
|
||||
self.out_indices = out_indices
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
|
||||
|
||||
labels = None
|
||||
if self.use_labels:
|
||||
labels = ids_tensor([self.batch_size], self.num_labels)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return config, pixel_values, labels
|
||||
|
||||
def get_config(self):
|
||||
return RTDetrResNetConfig(
|
||||
num_channels=self.num_channels,
|
||||
embeddings_size=self.embeddings_size,
|
||||
hidden_sizes=self.hidden_sizes,
|
||||
depths=self.depths,
|
||||
hidden_act=self.hidden_act,
|
||||
num_labels=self.num_labels,
|
||||
out_features=self.out_features,
|
||||
out_indices=self.out_indices,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values, labels = config_and_inputs
|
||||
inputs_dict = {"pixel_values": pixel_values}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class RTDetrResNetBackboneTest(BackboneTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (RTDetrResNetBackbone,) if is_torch_available() else ()
|
||||
has_attentions = False
|
||||
config_class = RTDetrResNetConfig
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = RTDetrResNetModelTester(self)
|
||||
Reference in New Issue
Block a user