init
This commit is contained in:
0
transformers/tests/models/vivit/__init__.py
Normal file
0
transformers/tests/models/vivit/__init__.py
Normal file
228
transformers/tests/models/vivit/test_image_processing_vivit.py
Normal file
228
transformers/tests/models/vivit/test_image_processing_vivit.py
Normal file
@@ -0,0 +1,228 @@
|
||||
# Copyright 2022 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import VivitImageProcessor
|
||||
|
||||
|
||||
class VivitImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
num_frames=10,
|
||||
image_size=18,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
image_mean=[0.5, 0.5, 0.5],
|
||||
image_std=[0.5, 0.5, 0.5],
|
||||
crop_size=None,
|
||||
):
|
||||
size = size if size is not None else {"shortest_edge": 18}
|
||||
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
|
||||
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.num_frames = num_frames
|
||||
self.image_size = image_size
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.crop_size = crop_size
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_normalize": self.do_normalize,
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"crop_size": self.crop_size,
|
||||
}
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"]
|
||||
|
||||
def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_video_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
num_frames=self.num_frames,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class VivitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = VivitImageProcessor if is_vision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = VivitImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "image_mean"))
|
||||
self.assertTrue(hasattr(image_processing, "image_std"))
|
||||
self.assertTrue(hasattr(image_processing, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processing, "do_center_crop"))
|
||||
self.assertTrue(hasattr(image_processing, "size"))
|
||||
|
||||
def test_image_processor_from_dict_with_kwargs(self):
|
||||
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
|
||||
self.assertEqual(image_processor.size, {"shortest_edge": 18})
|
||||
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
|
||||
|
||||
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
|
||||
self.assertEqual(image_processor.size, {"shortest_edge": 42})
|
||||
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
||||
|
||||
def test_rescale(self):
|
||||
# ViVit optionally rescales between -1 and 1 instead of the usual 0 and 1
|
||||
image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32)
|
||||
|
||||
image_processor = self.image_processing_class(**self.image_processor_dict)
|
||||
|
||||
rescaled_image = image_processor.rescale(image, scale=1 / 127.5)
|
||||
expected_image = (image * (1 / 127.5)).astype(np.float32) - 1
|
||||
self.assertTrue(np.allclose(rescaled_image, expected_image))
|
||||
|
||||
rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False)
|
||||
expected_image = (image / 255.0).astype(np.float32)
|
||||
self.assertTrue(np.allclose(rescaled_image, expected_image))
|
||||
|
||||
def test_call_pil(self):
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL videos
|
||||
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False)
|
||||
for video in video_inputs:
|
||||
self.assertIsInstance(video, list)
|
||||
self.assertIsInstance(video[0], Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
|
||||
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
|
||||
self.assertEqual(
|
||||
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
|
||||
)
|
||||
|
||||
def test_call_numpy(self):
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
|
||||
for video in video_inputs:
|
||||
self.assertIsInstance(video, list)
|
||||
self.assertIsInstance(video[0], np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
|
||||
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
|
||||
self.assertEqual(
|
||||
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
|
||||
)
|
||||
|
||||
def test_call_numpy_4_channels(self):
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
self.image_processor_tester.num_channels = 4
|
||||
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True)
|
||||
for video in video_inputs:
|
||||
self.assertIsInstance(video, list)
|
||||
self.assertIsInstance(video[0], np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = image_processing(
|
||||
video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first"
|
||||
).pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
|
||||
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_videos = image_processing(
|
||||
video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first"
|
||||
).pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
|
||||
self.assertEqual(
|
||||
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
|
||||
)
|
||||
self.image_processor_tester.num_channels = 3
|
||||
|
||||
def test_call_pytorch(self):
|
||||
# Initialize image_processing
|
||||
image_processing = self.image_processing_class(**self.image_processor_dict)
|
||||
# create random PyTorch tensors
|
||||
video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True)
|
||||
for video in video_inputs:
|
||||
self.assertIsInstance(video, list)
|
||||
self.assertIsInstance(video[0], torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]])
|
||||
self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos)
|
||||
self.assertEqual(
|
||||
tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape)
|
||||
)
|
||||
389
transformers/tests/models/vivit/test_modeling_vivit.py
Normal file
389
transformers/tests/models/vivit/test_modeling_vivit.py
Normal file
@@ -0,0 +1,389 @@
|
||||
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch ViViT model."""
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
import unittest
|
||||
from functools import cached_property
|
||||
|
||||
import numpy as np
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers import VivitConfig
|
||||
from transformers.models.auto import get_values
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VivitForVideoClassification, VivitModel
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from transformers import VivitImageProcessor
|
||||
|
||||
|
||||
class VivitModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=2,
|
||||
is_training=True,
|
||||
use_labels=True,
|
||||
num_labels=10,
|
||||
image_size=10,
|
||||
num_frames=8, # decreased, because default 32 takes too much RAM at inference
|
||||
tubelet_size=[2, 4, 4],
|
||||
num_channels=3,
|
||||
hidden_size=32,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=37,
|
||||
hidden_act="gelu_fast",
|
||||
hidden_dropout_prob=0.0,
|
||||
attention_probs_dropout_prob=0.0,
|
||||
initializer_range=0.02,
|
||||
layer_norm_eps=1e-06,
|
||||
qkv_bias=True,
|
||||
scope=None,
|
||||
attn_implementation="eager",
|
||||
mask_ratio=0.5,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.num_labels = num_labels
|
||||
self.image_size = image_size
|
||||
self.num_frames = num_frames
|
||||
self.tubelet_size = tubelet_size
|
||||
self.num_channels = num_channels
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.initializer_range = initializer_range
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.qkv_bias = qkv_bias
|
||||
self.scope = scope
|
||||
self.attn_implementation = attn_implementation
|
||||
|
||||
self.seq_length = (
|
||||
(self.image_size // self.tubelet_size[2])
|
||||
* (self.image_size // self.tubelet_size[1])
|
||||
* (self.num_frames // self.tubelet_size[0])
|
||||
) + 1 # CLS token
|
||||
self.mask_ratio = mask_ratio
|
||||
self.num_masks = int(mask_ratio * self.seq_length)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor(
|
||||
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]
|
||||
)
|
||||
|
||||
labels = None
|
||||
if self.use_labels:
|
||||
labels = ids_tensor([self.batch_size], self.num_labels)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return config, pixel_values, labels
|
||||
|
||||
def get_config(self):
|
||||
config = VivitConfig(
|
||||
num_frames=self.num_frames,
|
||||
image_size=self.image_size,
|
||||
tubelet_size=self.tubelet_size,
|
||||
num_channels=self.num_channels,
|
||||
hidden_size=self.hidden_size,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
intermediate_size=self.intermediate_size,
|
||||
hidden_act=self.hidden_act,
|
||||
hidden_dropout_prob=self.hidden_dropout_prob,
|
||||
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
|
||||
initializer_range=self.initializer_range,
|
||||
layer_norm_eps=self.layer_norm_eps,
|
||||
qkv_bias=self.qkv_bias,
|
||||
attn_implementation=self.attn_implementation,
|
||||
)
|
||||
config.num_labels = self.num_labels
|
||||
return config
|
||||
|
||||
def create_and_check_model(self, config, pixel_values, labels):
|
||||
model = VivitModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(pixel_values)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
|
||||
|
||||
def create_and_check_for_video_classification(self, config, pixel_values, labels):
|
||||
model = VivitForVideoClassification(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values)
|
||||
|
||||
# verify the logits shape
|
||||
expected_shape = torch.Size((self.batch_size, self.num_labels))
|
||||
self.parent.assertEqual(result.logits.shape, expected_shape)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values, labels = config_and_inputs
|
||||
inputs_dict = {"pixel_values": pixel_values}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
"""
|
||||
Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds,
|
||||
attention_mask and seq_length.
|
||||
"""
|
||||
|
||||
all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else ()
|
||||
pipeline_model_mapping = (
|
||||
{"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
|
||||
test_pruning = False
|
||||
test_torchscript = False
|
||||
test_resize_embeddings = False
|
||||
test_head_masking = False
|
||||
test_torch_exportable = True
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = VivitModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=VivitConfig, has_text_modality=False, hidden_size=37)
|
||||
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = copy.deepcopy(inputs_dict)
|
||||
|
||||
if return_labels:
|
||||
if model_class in get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING):
|
||||
inputs_dict["labels"] = torch.zeros(
|
||||
self.model_tester.batch_size, dtype=torch.long, device=torch_device
|
||||
)
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
@unittest.skip(reason="Vivit does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
def test_model_get_set_embeddings(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
|
||||
x = model.get_output_embeddings()
|
||||
self.assertTrue(x is None or isinstance(x, nn.Linear))
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
|
||||
expected_arg_names = ["pixel_values", "head_mask"]
|
||||
self.assertListEqual(arg_names[:2], expected_arg_names)
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_for_video_classification(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_video_classification(*config_and_inputs)
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "google/vivit-b-16x2-kinetics400"
|
||||
model = VivitModel.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
def test_attention_outputs(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.return_dict = True
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
seq_len = self.model_tester.seq_length
|
||||
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = False
|
||||
config.return_dict = True
|
||||
model = model_class._from_config(config, attn_implementation="eager")
|
||||
config = model.config
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
# check that output_attentions also work using config
|
||||
del inputs_dict["output_attentions"]
|
||||
config.output_attentions = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, seq_len, seq_len],
|
||||
)
|
||||
out_len = len(outputs)
|
||||
|
||||
# Check attention is always last and order is fine
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
self.assertEqual(out_len + 1, len(outputs))
|
||||
|
||||
self_attentions = outputs.attentions
|
||||
|
||||
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(self_attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, seq_len, seq_len],
|
||||
)
|
||||
|
||||
def test_hidden_states_output(self):
|
||||
def check_hidden_states_output(inputs_dict, config, model_class):
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
hidden_states = outputs.hidden_states
|
||||
expected_num_layers = self.model_tester.num_hidden_layers + 1
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
seq_length = self.model_tester.seq_length
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[seq_length, self.model_tester.hidden_size],
|
||||
)
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# check that output_hidden_states also work using config
|
||||
del inputs_dict["output_hidden_states"]
|
||||
config.output_hidden_states = True
|
||||
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
|
||||
# We will verify our results on a video of eating spaghetti
|
||||
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
|
||||
def prepare_video():
|
||||
file = hf_hub_download(
|
||||
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
|
||||
)
|
||||
video = np.load(file)
|
||||
return list(video)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class VivitModelIntegrationTest(unittest.TestCase):
|
||||
@cached_property
|
||||
def default_image_processor(self):
|
||||
return VivitImageProcessor() if is_vision_available() else None
|
||||
|
||||
@slow
|
||||
def test_inference_for_video_classification(self):
|
||||
model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
|
||||
|
||||
image_processor = self.default_image_processor
|
||||
video = prepare_video()
|
||||
inputs = image_processor(video, return_tensors="pt").to(torch_device)
|
||||
|
||||
# forward pass
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
|
||||
# verify the logits
|
||||
expected_shape = torch.Size((1, 400))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [-0.9498, 2.7971, -1.4049, 0.1024, -1.8353],
|
||||
("cuda", 8): [-0.9498, 2.7971, -1.4049, 0.1025, -1.8353],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(outputs.logits[0, :5], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
@slow
|
||||
def test_inference_interpolate_pos_encoding(self):
|
||||
# Vivit models have an `interpolate_pos_encoding` argument in their forward method,
|
||||
# allowing to interpolate the pre-trained position embeddings in order to use
|
||||
# the model on higher resolutions. The DINO model by Facebook AI leverages this
|
||||
# to visualize self-attention on higher resolution images.
|
||||
model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400").to(torch_device)
|
||||
|
||||
image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
|
||||
video = prepare_video()
|
||||
inputs = image_processor(
|
||||
video, size={"shortest_edge": 480}, crop_size={"height": 232, "width": 232}, return_tensors="pt"
|
||||
)
|
||||
pixel_values = inputs.pixel_values.to(torch_device)
|
||||
|
||||
# forward pass
|
||||
with torch.no_grad():
|
||||
outputs = model(pixel_values, interpolate_pos_encoding=True)
|
||||
|
||||
# verify the logits shape
|
||||
expected_shape = torch.Size((1, 3137, 768))
|
||||
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
|
||||
Reference in New Issue
Block a user