This commit is contained in:
2025-10-09 16:47:16 +08:00
parent c8feb4deb5
commit e27e3f16bb
5248 changed files with 1778505 additions and 0 deletions

View File

@@ -0,0 +1,605 @@
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers.image_utils import ChannelDimension
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import Mask2FormerImageProcessor
from transformers.models.mask2former.image_processing_mask2former import binary_mask_to_rle
from transformers.models.mask2former.modeling_mask2former import Mask2FormerForUniversalSegmentationOutput
if is_torchvision_available():
from transformers import Mask2FormerImageProcessorFast
if is_vision_available():
from PIL import Image
class Mask2FormerImageProcessingTester:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
size=None,
do_resize=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
num_labels=10,
do_reduce_labels=True,
ignore_index=255,
pad_size=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.size_divisor = 0
self.pad_size = pad_size
# for the post_process_functions
self.batch_size = 2
self.num_queries = 3
self.num_classes = 2
self.height = 3
self.width = 4
self.num_labels = num_labels
self.do_reduce_labels = do_reduce_labels
self.ignore_index = ignore_index
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"size_divisor": self.size_divisor,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"pad_size": self.pad_size,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to Mask2FormerImageProcessor,
assuming do_resize is set to True with a scalar size.
"""
if not batched:
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
if w < h:
expected_height = int(self.size["shortest_edge"] * h / w)
expected_width = self.size["shortest_edge"]
elif w > h:
expected_height = self.size["shortest_edge"]
expected_width = int(self.size["shortest_edge"] * w / h)
else:
expected_height = self.size["shortest_edge"]
expected_width = self.size["shortest_edge"]
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def get_fake_mask2former_outputs(self):
return Mask2FormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)),
masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)),
)
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_single_inputs
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
# Copied from transformers.tests.models.beit.test_image_processing_beit.prepare_semantic_batch_inputs
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
class Mask2FormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Mask2FormerImageProcessor if (is_vision_available() and is_torch_available()) else None
fast_image_processing_class = (
Mask2FormerImageProcessorFast if (is_vision_available() and is_torchvision_available()) else None
)
def setUp(self):
super().setUp()
self.image_processor_tester = Mask2FormerImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "ignore_index"))
self.assertTrue(hasattr(image_processing, "num_labels"))
def comm_get_image_processing_inputs(
self,
image_processor_tester,
with_segmentation_maps=False,
is_instance_map=False,
segmentation_type="np",
numpify=False,
input_data_format=None,
):
image_processing = self.image_processing_class(**image_processor_tester.prepare_image_processor_dict())
# prepare image and target
num_labels = image_processor_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=numpify)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, img.shape[:2] if numpify else (img.size[1], img.size[0])).astype(
np.uint8
)
for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
if input_data_format is ChannelDimension.FIRST and numpify:
image_inputs = [np.moveaxis(img, -1, 0) for img in image_inputs]
inputs = image_processing(
image_inputs,
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
pad_and_return_pixel_mask=True,
input_data_format=input_data_format,
)
return inputs
def test_with_size_divisor(self):
size_divisors = [8, 16, 32]
weird_input_sizes = [(407, 802), (582, 1094)]
for image_processing_class in self.image_processor_list:
for size_divisor in size_divisors:
image_processor_dict = {**self.image_processor_dict, **{"size_divisor": size_divisor}}
image_processing = image_processing_class(**image_processor_dict)
for weird_input_size in weird_input_sizes:
inputs = image_processing([np.ones((3, *weird_input_size))], return_tensors="pt")
pixel_values = inputs["pixel_values"]
# check if divisible
self.assertTrue((pixel_values.shape[-1] % size_divisor) == 0)
self.assertTrue((pixel_values.shape[-2] % size_divisor) == 0)
def test_call_with_segmentation_maps(self):
def common(
is_instance_map=False,
segmentation_type=None,
numpify=False,
num_channels=3,
input_data_format=None,
do_resize=True,
):
image_processor_tester = Mask2FormerImageProcessingTester(
self,
num_channels=num_channels,
do_resize=do_resize,
image_mean=[0.5] * num_channels,
image_std=[0.5] * num_channels,
)
inputs = self.comm_get_image_processing_inputs(
image_processor_tester=image_processor_tester,
with_segmentation_maps=True,
is_instance_map=is_instance_map,
segmentation_type=segmentation_type,
numpify=numpify,
input_data_format=input_data_format,
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
# check the batch_size
for mask_label, class_label in zip(mask_labels, class_labels):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
common(num_channels=1, numpify=True)
common(num_channels=1, numpify=True, input_data_format=ChannelDimension.FIRST)
common(num_channels=2, numpify=True, input_data_format=ChannelDimension.LAST)
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.LAST, do_resize=False)
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.FIRST, do_resize=False)
with self.assertRaisesRegex(ValueError, expected_regex="Unable to infer channel dimension format"):
common(num_channels=5, numpify=True, do_resize=False)
with self.assertRaisesRegex(TypeError, expected_regex=r"Cannot handle this data type: .*"):
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.LAST)
def test_integration_instance_segmentation(self):
# load 2 images and corresponding annotations from the hub
repo_id = "nielsr/image-segmentation-toy-data"
image1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_1.png", repo_type="dataset")
)
image2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_2.png", repo_type="dataset")
)
annotation1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_1.png", repo_type="dataset")
)
annotation2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_2.png", repo_type="dataset")
)
# get instance segmentations and instance-to-segmentation mappings
def get_instance_segmentation_and_mapping(annotation):
instance_seg = np.array(annotation)[:, :, 1]
class_id_map = np.array(annotation)[:, :, 0]
class_labels = np.unique(class_id_map)
# create mapping between instance IDs and semantic category IDs
inst2class = {}
for label in class_labels:
instance_ids = np.unique(instance_seg[class_id_map == label])
inst2class.update(dict.fromkeys(instance_ids, label))
return instance_seg, inst2class
instance_seg1, inst2class1 = get_instance_segmentation_and_mapping(annotation1)
instance_seg2, inst2class2 = get_instance_segmentation_and_mapping(annotation2)
# create a image processor
image_processing = Mask2FormerImageProcessor(do_reduce_labels=True, ignore_index=255, size=(512, 512))
# prepare the images and annotations
inputs = image_processing(
[image1, image2],
[instance_seg1, instance_seg2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55]))
torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (2, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (4, 512, 512))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 26259.0)
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding semantic annotations from the hub
repo_id = "nielsr/image-segmentation-toy-data"
image1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_1.png", repo_type="dataset")
)
image2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_2.png", repo_type="dataset")
)
annotation1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_1.png", repo_type="dataset")
)
annotation2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_2.png", repo_type="dataset")
)
# create a image processor
image_processing = Mask2FormerImageProcessor(do_reduce_labels=True, ignore_index=255, size=(512, 512))
# prepare the images and annotations
inputs = image_processing(
[image1, image2],
[annotation1, annotation2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60]))
torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (3, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (8, 512, 512))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 257036.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
# create a image processor
image_processing = Mask2FormerImageProcessor(ignore_index=0, do_resize=False)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = image_processing.encode_inputs(
pixel_values_list,
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels))
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
for image_processing_class in self.image_processor_list:
feature_extractor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = feature_extractor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.image_processor_tester.batch_size)
self.assertEqual(segmentation[0].shape, (384, 384))
target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)]
segmentation = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (384, 384))
segmentation = image_processor.post_process_instance_segmentation(
outputs, threshold=0, return_binary_maps=True
)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(len(el["segmentation"].shape), 3)
self.assertEqual(el["segmentation"].shape[1:], (384, 384))
def test_post_process_panoptic_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processing.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (384, 384))
def test_post_process_label_fusing(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processor.post_process_panoptic_segmentation(
outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0
)
unfused_segments = [el["segments_info"] for el in segmentation]
fused_segmentation = image_processor.post_process_panoptic_segmentation(
outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0, label_ids_to_fuse={1}
)
fused_segments = [el["segments_info"] for el in fused_segmentation]
for el_unfused, el_fused in zip(unfused_segments, fused_segments):
if len(el_unfused) == 0:
self.assertEqual(len(el_unfused), len(el_fused))
continue
# Get number of segments to be fused
fuse_targets = [1 for el in el_unfused if el["label_id"] in {1}]
num_to_fuse = 0 if len(fuse_targets) == 0 else sum(fuse_targets) - 1
# Expected number of segments after fusing
expected_num_segments = max([el["id"] for el in el_unfused]) - num_to_fuse
num_segments_fused = max([el["id"] for el in el_fused])
self.assertEqual(num_segments_fused, expected_num_segments)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(image_encoding_slow.mask_labels, image_encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(
image_encoding_slow.class_labels, image_encoding_fast.class_labels
):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(encoding_slow.mask_labels, encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(encoding_slow.class_labels, encoding_fast.class_labels):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())

View File

@@ -0,0 +1,604 @@
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Mask2Former model."""
import unittest
from functools import cached_property
import numpy as np
import pytest
from tests.test_modeling_common import floats_tensor
from transformers import AutoModelForImageClassification, Mask2FormerConfig, is_torch_available, is_vision_available
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
from transformers.testing_utils import (
Expectations,
require_timm,
require_torch,
require_torch_accelerator,
require_torch_fp16,
require_torch_multi_gpu,
require_vision,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import Mask2FormerForUniversalSegmentation, Mask2FormerModel
if is_vision_available():
from transformers import Mask2FormerImageProcessor
if is_vision_available():
from PIL import Image
class Mask2FormerModelTester:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
use_auxiliary_loss=False,
num_queries=10,
num_channels=3,
min_size=32 * 8,
max_size=32 * 8,
num_labels=4,
hidden_dim=64,
num_attention_heads=4,
num_hidden_layers=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_auxiliary_loss = use_auxiliary_loss
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.num_labels = num_labels
self.hidden_dim = hidden_dim
self.mask_feature_size = hidden_dim
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
torch_device
)
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
mask_labels = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5
).float()
class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long()
config = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def get_config(self):
config = Mask2FormerConfig(
hidden_size=self.hidden_dim,
num_attention_heads=self.num_attention_heads,
num_hidden_layers=self.num_hidden_layers,
encoder_feedforward_dim=16,
dim_feedforward=32,
num_queries=self.num_queries,
num_labels=self.num_labels,
decoder_layers=2,
encoder_layers=2,
feature_size=16,
)
config.num_queries = self.num_queries
config.num_labels = self.num_labels
config.backbone_config.embed_dim = 16
config.backbone_config.depths = [1, 1, 1, 1]
config.backbone_config.hidden_size = 16
config.backbone_config.num_channels = self.num_channels
config.backbone_config.num_heads = [1, 1, 2, 2]
config.backbone = None
config.hidden_dim = self.hidden_dim
config.mask_feature_size = self.hidden_dim
config.feature_size = self.hidden_dim
return config
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def check_output_hidden_state(self, output, config):
encoder_hidden_states = output.encoder_hidden_states
pixel_decoder_hidden_states = output.pixel_decoder_hidden_states
transformer_decoder_hidden_states = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths))
self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths))
self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers)
def create_and_check_mask2former_model(self, config, pixel_values, pixel_mask, output_hidden_states=False):
with torch.no_grad():
model = Mask2FormerModel(config=config)
model.to(torch_device)
model.eval()
output = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
output = model(pixel_values, output_hidden_states=True)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape,
(self.batch_size, self.num_queries, self.hidden_dim),
)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(output, config)
def create_and_check_mask2former_instance_segmentation_head_model(
self, config, pixel_values, pixel_mask, mask_labels, class_labels
):
model = Mask2FormerForUniversalSegmentation(config=config)
model.to(torch_device)
model.eval()
def comm_check_on_output(result):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,
(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),
)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)
)
with torch.no_grad():
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
comm_check_on_output(result)
result = model(
pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels
)
comm_check_on_output(result)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([]))
@require_torch
class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else ()
pipeline_model_mapping = {"image-feature-extraction": Mask2FormerModel} if is_torch_available() else {}
is_encoder_decoder = False
test_pruning = False
test_head_masking = False
test_missing_keys = False
test_torch_exportable = True
def setUp(self):
self.model_tester = Mask2FormerModelTester(self)
self.config_tester = ConfigTester(self, config_class=Mask2FormerConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_mask2former_model(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=False)
def test_mask2former_instance_segmentation_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mask2former_instance_segmentation_head_model(*config_and_inputs)
@unittest.skip(reason="Mask2Former does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Mask2Former is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`"
)
def test_multi_gpu_data_parallel_forward(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
model = Mask2FormerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_model_with_labels(self):
size = (self.model_tester.min_size,) * 2
inputs = {
"pixel_values": torch.randn((2, 3, *size), device=torch_device),
"mask_labels": torch.randn((2, 10, *size), device=torch_device),
"class_labels": torch.zeros(2, 10, device=torch_device).long(),
}
config = self.model_tester.get_config()
model = Mask2FormerForUniversalSegmentation(config).to(torch_device)
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
def test_hidden_states_output(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=True)
def test_attention_outputs(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
outputs = model(**inputs, output_attentions=True)
self.assertTrue(outputs.attentions is not None)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
model_class = self.all_model_classes[1]
config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs()
model = model_class(config)
model.to(torch_device)
model.train()
loss = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels).loss
loss.backward()
def test_retain_grad_hidden_states_attentions(self):
model_class = self.all_model_classes[1]
config, pixel_values, pixel_mask, mask_labels, class_labels = self.model_tester.prepare_config_and_inputs()
config.output_hidden_states = True
config.output_attentions = True
model = model_class(config).to(torch_device)
model.train()
outputs = model(pixel_values, mask_labels=mask_labels, class_labels=class_labels)
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
attentions = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
@require_timm
def test_backbone_selection(self):
config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
config.backbone_config = None
config.backbone_kwargs = {"out_indices": [1, 2, 3]}
config.use_pretrained_backbone = True
# Load a timm backbone
# We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices
config.backbone = "resnet18"
config.use_timm_backbone = True
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device).eval()
if model.__class__.__name__ == "Mask2FormerModel":
self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3])
elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation":
self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3])
# Load a HF backbone
config.backbone = "microsoft/resnet-18"
config.use_timm_backbone = False
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device).eval()
if model.__class__.__name__ == "Mask2FormerModel":
self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3])
elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation":
self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3])
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
if (
"self_attn.sampling_offsets.bias" in name
or "self_attn.value_proj.weight" in name
or "self_attn.output_proj.weight" in name
):
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def test_initialization_pretrained_backbone(self):
backbone_name = "microsoft/resnet-18"
# load Mask2Former config with a pretrained backbone
config = Mask2FormerConfig(
backbone=backbone_name,
use_pretrained_backbone=True,
)
# load pretrained backbone
backbone_model = AutoModelForImageClassification.from_pretrained(backbone_name, device_map=torch_device)
def params_match(params1, params2):
return all((p1 == p2).all() for p1, p2 in zip(params1, params2))
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device).eval()
if model.__class__.__name__ == "Mask2FormerModel":
self.assertTrue(
params_match(
backbone_model.base_model.encoder.parameters(),
model.pixel_level_module.encoder.encoder.parameters(),
)
)
elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation":
self.assertTrue(
params_match(
backbone_model.base_model.encoder.parameters(),
model.model.pixel_level_module.encoder.encoder.parameters(),
)
)
TOLERANCE = 2e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@slow
class Mask2FormerModelIntegrationTest(unittest.TestCase):
@cached_property
def model_checkpoints(self):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def default_image_processor(self):
return Mask2FormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def test_inference_no_head(self):
model = Mask2FormerModel.from_pretrained(self.model_checkpoints).to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 384, 384))
with torch.no_grad():
outputs = model(**inputs)
expected_slice_hidden_state = torch.tensor(
[
[-0.2790, -1.0717, -1.1668],
[-0.5128, -0.3128, -0.4987],
[-0.5832, 0.1971, -0.0197],
]
).to(torch_device)
torch.testing.assert_close(
outputs.encoder_last_hidden_state[0, 0, :3, :3],
expected_slice_hidden_state,
atol=TOLERANCE,
rtol=TOLERANCE,
)
expectations = Expectations(
{
(None, None): [
[0.8973, 1.1847, 1.1776],
[1.1934, 1.5040, 1.5128],
[1.1153, 1.4486, 1.4951],
],
("cuda", 8): [
[0.8974, 1.1848, 1.1777],
[1.1933, 1.5041, 1.5128],
[1.1154, 1.4487, 1.4950],
],
}
)
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE,rtol=TOLERANCE) # fmt: skip
expectations = Expectations(
{
(None, None): [
[2.1152, 1.7000, -0.8603],
[1.5808, 1.8004, -0.9353],
[1.6043, 1.7495, -0.5999],
],
("cuda", 8): [
[2.1153, 1.7004, -0.8604],
[1.5807, 1.8007, -0.9354],
[1.6040, 1.7498, -0.6001],
],
}
)
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.transformer_decoder_last_hidden_state[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) # fmt: skip
def test_inference_universal_segmentation_head(self):
model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
inputs_shape = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(inputs_shape, (1, 3, 384, 384))
with torch.no_grad():
outputs = model(**inputs)
# masks_queries_logits
masks_queries_logits = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)
)
expectations = Expectations(
{
(None, None): [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
],
("cuda", 8): [
[-8.7839, -9.0056, -8.8122],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3428, -6.4675],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
# class_queries_logits
class_queries_logits = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
expectations = Expectations(
{
(None, None): [
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
],
("cuda", 8): [
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(
outputs.class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE
)
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
model = (
Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints)
.to(torch_device, dtype=torch.float16)
.eval()
)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
_ = model(**inputs)
def test_with_segmentation_maps_and_loss(self):
model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
image_processor = self.default_image_processor
inputs = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))],
segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)],
return_tensors="pt",
)
inputs["pixel_values"] = inputs["pixel_values"].to(torch_device)
inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]]
inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]]
with torch.no_grad():
outputs = model(**inputs)
self.assertTrue(outputs.loss is not None)
@pytest.mark.torch_export_test
def test_export(self):
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = Mask2FormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
exported_program = torch.export.export(
model,
args=(inputs["pixel_values"], inputs["pixel_mask"]),
strict=True,
)
with torch.no_grad():
eager_outputs = model(**inputs)
exported_outputs = exported_program.module().forward(inputs["pixel_values"], inputs["pixel_mask"])
self.assertEqual(eager_outputs.masks_queries_logits.shape, exported_outputs.masks_queries_logits.shape)
torch.testing.assert_close(
eager_outputs.masks_queries_logits, exported_outputs.masks_queries_logits, rtol=TOLERANCE, atol=TOLERANCE
)
self.assertEqual(eager_outputs.class_queries_logits.shape, exported_outputs.class_queries_logits.shape)
torch.testing.assert_close(
eager_outputs.class_queries_logits, exported_outputs.class_queries_logits, rtol=TOLERANCE, atol=TOLERANCE
)