init
This commit is contained in:
@@ -0,0 +1,661 @@
|
||||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch InstructBlipVideo model."""
|
||||
|
||||
import inspect
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
InstructBlipVideoConfig,
|
||||
InstructBlipVideoProcessor,
|
||||
InstructBlipVideoQFormerConfig,
|
||||
InstructBlipVideoVisionConfig,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
require_accelerate,
|
||||
require_bitsandbytes,
|
||||
require_torch,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import is_torch_available
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import (
|
||||
ModelTesterMixin,
|
||||
floats_tensor,
|
||||
ids_tensor,
|
||||
random_attention_mask,
|
||||
)
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from transformers import (
|
||||
InstructBlipVideoForConditionalGeneration,
|
||||
InstructBlipVideoModel,
|
||||
InstructBlipVideoVisionModel,
|
||||
)
|
||||
|
||||
|
||||
class InstructBlipVideoVisionModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=12,
|
||||
image_size=30,
|
||||
frames=4,
|
||||
patch_size=2,
|
||||
num_channels=3,
|
||||
is_training=True,
|
||||
hidden_size=32,
|
||||
projection_dim=32,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=37,
|
||||
dropout=0.1,
|
||||
attention_dropout=0.1,
|
||||
initializer_range=1e-10,
|
||||
scope=None,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.image_size = image_size
|
||||
self.frames = frames
|
||||
self.patch_size = patch_size
|
||||
self.num_channels = num_channels
|
||||
self.is_training = is_training
|
||||
self.hidden_size = hidden_size
|
||||
self.projection_dim = projection_dim
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.initializer_range = initializer_range
|
||||
self.scope = scope
|
||||
|
||||
# in case of a vision transformer, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
|
||||
num_patches = (image_size // patch_size) ** 2
|
||||
self.seq_length = num_patches + 1
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor(
|
||||
[self.batch_size * self.frames, self.num_channels, self.image_size, self.image_size]
|
||||
)
|
||||
config = self.get_config()
|
||||
|
||||
return config, pixel_values
|
||||
|
||||
def get_config(self):
|
||||
return InstructBlipVideoVisionConfig(
|
||||
image_size=self.image_size,
|
||||
patch_size=self.patch_size,
|
||||
num_channels=self.num_channels,
|
||||
hidden_size=self.hidden_size,
|
||||
projection_dim=self.projection_dim,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
intermediate_size=self.intermediate_size,
|
||||
dropout=self.dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
initializer_range=self.initializer_range,
|
||||
)
|
||||
|
||||
def create_and_check_model(self, config, pixel_values):
|
||||
model = InstructBlipVideoVisionModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
result = model(pixel_values)
|
||||
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
|
||||
image_size = (self.image_size, self.image_size)
|
||||
patch_size = (self.patch_size, self.patch_size)
|
||||
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
|
||||
self.parent.assertEqual(
|
||||
result.last_hidden_state.shape, (self.batch_size * self.frames, num_patches + 1, self.hidden_size)
|
||||
)
|
||||
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.frames, self.hidden_size))
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values = config_and_inputs
|
||||
inputs_dict = {"pixel_values": pixel_values}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class InstructBlipVideoVisionModelTest(ModelTesterMixin, unittest.TestCase):
|
||||
"""
|
||||
Here we also overwrite some of the tests of test_modeling_common.py, as InstructBlipVideo's vision encoder does not use input_ids, inputs_embeds,
|
||||
attention_mask and seq_length.
|
||||
"""
|
||||
|
||||
all_model_classes = (InstructBlipVideoVisionModel,) if is_torch_available() else ()
|
||||
fx_compatible = False
|
||||
test_pruning = False
|
||||
test_resize_embeddings = False
|
||||
test_head_masking = False
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = InstructBlipVideoVisionModelTester(self)
|
||||
common_properties = ["num_query_tokens", "video_token_index"]
|
||||
self.config_tester = ConfigTester(
|
||||
self, config_class=InstructBlipVideoConfig, has_text_modality=False, common_properties=common_properties
|
||||
)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
@unittest.skip(reason="InstructBlipVideo's vision encoder does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="InstructBlipVideo's vision encoder is an nn.Embeddings layer")
|
||||
def test_model_get_set_embeddings(self):
|
||||
pass
|
||||
|
||||
def test_model_common_attributes(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
|
||||
x = model.get_output_embeddings()
|
||||
self.assertTrue(x is None or isinstance(x, nn.Linear))
|
||||
|
||||
def test_forward_signature(self):
|
||||
for model_class in self.all_model_classes:
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
|
||||
expected_arg_names = ["pixel_values"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
@unittest.skip(
|
||||
reason="InstructBlipVideoVisionModel is an internal building block, doesn't support standalone training"
|
||||
)
|
||||
def test_training(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="InstructBlipVideoVisionModel is an internal building block, doesn't support standalone training"
|
||||
)
|
||||
def test_training_gradient_checkpointing(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
|
||||
)
|
||||
def test_training_gradient_checkpointing_use_reentrant(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
|
||||
)
|
||||
def test_training_gradient_checkpointing_use_reentrant_false(self):
|
||||
pass
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "Salesforce/instructblip-vicuna-7b"
|
||||
model = InstructBlipVideoVisionModel.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
|
||||
class InstructBlipVideoQFormerModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=12,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_input_mask=True,
|
||||
use_labels=True,
|
||||
vocab_size=99,
|
||||
hidden_size=32,
|
||||
projection_dim=32,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=37,
|
||||
dropout=0.1,
|
||||
attention_dropout=0.1,
|
||||
max_position_embeddings=512,
|
||||
initializer_range=0.02,
|
||||
bos_token_id=0,
|
||||
scope=None,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_input_mask = use_input_mask
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.projection_dim = projection_dim
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.initializer_range = initializer_range
|
||||
self.scope = scope
|
||||
self.bos_token_id = bos_token_id
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
input_mask = None
|
||||
if self.use_input_mask:
|
||||
input_mask = random_attention_mask([self.batch_size, self.seq_length])
|
||||
qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
|
||||
|
||||
if input_mask is not None:
|
||||
batch_size, seq_length = input_mask.shape
|
||||
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
|
||||
for batch_idx, start_index in enumerate(rnd_start_indices):
|
||||
input_mask[batch_idx, :start_index] = 1
|
||||
input_mask[batch_idx, start_index:] = 0
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask
|
||||
|
||||
def get_config(self):
|
||||
return InstructBlipVideoQFormerConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
hidden_size=self.hidden_size,
|
||||
projection_dim=self.projection_dim,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
intermediate_size=self.intermediate_size,
|
||||
dropout=self.dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
max_position_embeddings=self.max_position_embeddings,
|
||||
initializer_range=self.initializer_range,
|
||||
bos_token_id=self.bos_token_id,
|
||||
)
|
||||
|
||||
|
||||
# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py
|
||||
class InstructBlipVideoTextModelDecoderOnlyTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=12,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_labels=False,
|
||||
vocab_size=99,
|
||||
hidden_size=16,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=4,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.1,
|
||||
attention_probs_dropout_prob=0.1,
|
||||
max_position_embeddings=100,
|
||||
eos_token_id=2,
|
||||
pad_token_id=1,
|
||||
bos_token_id=0,
|
||||
embed_dim=16,
|
||||
num_labels=3,
|
||||
word_embed_proj_dim=16,
|
||||
type_sequence_label_size=2,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.bos_token_id = bos_token_id
|
||||
self.embed_dim = embed_dim
|
||||
self.num_labels = num_labels
|
||||
self.type_sequence_label_size = type_sequence_label_size
|
||||
self.word_embed_proj_dim = word_embed_proj_dim
|
||||
self.is_encoder_decoder = False
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
config = self.get_config()
|
||||
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3)
|
||||
input_ids[:, -1] = self.eos_token_id # Eos Token
|
||||
|
||||
attention_mask = input_ids.ne(self.pad_token_id)
|
||||
|
||||
return config, input_ids, attention_mask
|
||||
|
||||
def get_config(self):
|
||||
return CONFIG_MAPPING["opt"](
|
||||
vocab_size=self.vocab_size,
|
||||
hidden_size=self.hidden_size,
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
ffn_dim=self.intermediate_size,
|
||||
dropout=self.hidden_dropout_prob,
|
||||
attention_dropout=self.attention_probs_dropout_prob,
|
||||
max_position_embeddings=self.max_position_embeddings,
|
||||
eos_token_id=self.eos_token_id,
|
||||
bos_token_id=self.bos_token_id,
|
||||
pad_token_id=self.pad_token_id,
|
||||
embed_dim=self.embed_dim,
|
||||
is_encoder_decoder=False,
|
||||
word_embed_proj_dim=self.word_embed_proj_dim,
|
||||
)
|
||||
|
||||
|
||||
# this model tester uses a decoder-only language model (OPT)
|
||||
class InstructBlipVideoForConditionalGenerationDecoderOnlyModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
vision_kwargs=None,
|
||||
qformer_kwargs=None,
|
||||
text_kwargs=None,
|
||||
is_training=True,
|
||||
num_query_tokens=10,
|
||||
video_token_index=4,
|
||||
):
|
||||
if vision_kwargs is None:
|
||||
vision_kwargs = {}
|
||||
if qformer_kwargs is None:
|
||||
qformer_kwargs = {}
|
||||
if text_kwargs is None:
|
||||
text_kwargs = {}
|
||||
|
||||
self.parent = parent
|
||||
self.vision_model_tester = InstructBlipVideoVisionModelTester(parent, **vision_kwargs)
|
||||
self.qformer_model_tester = InstructBlipVideoQFormerModelTester(parent, **qformer_kwargs)
|
||||
self.text_model_tester = InstructBlipVideoTextModelDecoderOnlyTester(parent, **text_kwargs)
|
||||
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
|
||||
self.frames = self.vision_model_tester.frames
|
||||
# need seq_length for common tests
|
||||
self.seq_length = self.text_model_tester.seq_length + (num_query_tokens * self.frames)
|
||||
self.is_training = is_training
|
||||
self.num_query_tokens = num_query_tokens
|
||||
self.video_token_index = video_token_index
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
|
||||
_, _, _, qformer_input_ids, qformer_attention_mask = self.qformer_model_tester.prepare_config_and_inputs()
|
||||
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
|
||||
_, c, h, w = pixel_values.shape
|
||||
pixel_values = pixel_values.reshape(-1, self.frames, c, h, w)
|
||||
|
||||
vision_tokens = (
|
||||
torch.ones(
|
||||
(input_ids.shape[0], self.num_query_tokens * self.frames), device=torch_device, dtype=input_ids.dtype
|
||||
)
|
||||
* self.video_token_index
|
||||
)
|
||||
input_ids[input_ids == self.video_token_index] = self.text_model_tester.pad_token_id
|
||||
input_ids = torch.cat([vision_tokens, input_ids], dim=-1)
|
||||
vision_attention_mask = torch.ones_like(vision_tokens)
|
||||
attention_mask = torch.cat([vision_attention_mask, attention_mask], dim=-1)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
|
||||
|
||||
def get_config(self):
|
||||
return InstructBlipVideoConfig.from_vision_qformer_text_configs(
|
||||
vision_config=self.vision_model_tester.get_config(),
|
||||
qformer_config=self.qformer_model_tester.get_config(),
|
||||
text_config=self.text_model_tester.get_config(),
|
||||
num_query_tokens=self.num_query_tokens,
|
||||
video_token_index=self.video_token_index,
|
||||
)
|
||||
|
||||
def create_and_check_for_conditional_generation(
|
||||
self, config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values
|
||||
):
|
||||
model = InstructBlipVideoForConditionalGeneration(config).to(torch_device).eval()
|
||||
with torch.no_grad():
|
||||
result = model(
|
||||
pixel_values,
|
||||
input_ids=input_ids,
|
||||
attention_mask=attention_mask,
|
||||
qformer_input_ids=qformer_input_ids,
|
||||
qformer_attention_mask=qformer_attention_mask,
|
||||
)
|
||||
|
||||
expected_seq_length = (
|
||||
self.num_query_tokens * self.vision_model_tester.frames
|
||||
) + self.text_model_tester.seq_length
|
||||
self.parent.assertEqual(
|
||||
result.logits.shape,
|
||||
(self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size),
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, input_ids, attention_mask, qformer_input_ids, qformer_attention_mask, pixel_values = config_and_inputs
|
||||
inputs_dict = {
|
||||
"pixel_values": pixel_values,
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"qformer_input_ids": qformer_input_ids,
|
||||
"qformer_attention_mask": qformer_attention_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class InstructBlipVideoForConditionalGenerationDecoderOnlyTest(
|
||||
ModelTesterMixin, GenerationTesterMixin, unittest.TestCase
|
||||
):
|
||||
all_model_classes = (
|
||||
(InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel) if is_torch_available() else ()
|
||||
)
|
||||
additional_model_inputs = ["qformer_input_ids", "input_ids"]
|
||||
fx_compatible = False
|
||||
test_head_masking = False
|
||||
test_pruning = False
|
||||
test_resize_embeddings = True
|
||||
test_attention_outputs = False
|
||||
test_torchscript = False
|
||||
_is_composite = True
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = InstructBlipVideoForConditionalGenerationDecoderOnlyModelTester(self)
|
||||
common_properties = ["num_query_tokens", "video_token_index"]
|
||||
self.config_tester = ConfigTester(
|
||||
self, config_class=InstructBlipVideoConfig, has_text_modality=False, common_properties=common_properties
|
||||
)
|
||||
|
||||
def test_for_conditional_generation(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
@unittest.skip(
|
||||
reason="InstructBlipVideoQFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
|
||||
)
|
||||
def test_eager_matches_sdpa_generate(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Hidden_states is tested in individual model tests")
|
||||
def test_hidden_states_output(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="InstructBlipVideoForConditionalGeneration doesn't support inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Tied weights are tested in individual model tests")
|
||||
def test_tied_weights_keys(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Retain_grad is tested in individual model tests")
|
||||
def test_retain_grad_hidden_states_attentions(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="InstructBlipVideoModel does not have input/output embeddings")
|
||||
def test_model_common_attributes(self):
|
||||
pass
|
||||
|
||||
def test_forward_signature(self):
|
||||
for model_class in self.all_model_classes:
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
|
||||
expected_arg_names = ["pixel_values"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_load_vision_qformer_text_config(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# Save InstructBlipVideoConfig and check if we can load InstructBlipVideoVisionConfig from it
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
config.save_pretrained(tmp_dir_name)
|
||||
vision_config = InstructBlipVideoVisionConfig.from_pretrained(tmp_dir_name)
|
||||
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
|
||||
|
||||
# Save InstructBlipVideoConfig and check if we can load InstructBlipVideoQFormerConfig from it
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
config.save_pretrained(tmp_dir_name)
|
||||
qformer_config = InstructBlipVideoQFormerConfig.from_pretrained(tmp_dir_name)
|
||||
self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict())
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "Salesforce/instructblip-vicuna-7b"
|
||||
model = InstructBlipVideoForConditionalGeneration.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
# overwrite because InstructBLIPVideo internally calls LM.generate() with embeds thus it cannot operate in no cache format
|
||||
def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1):
|
||||
use_cache = True # force this to be True in case False is passed
|
||||
super()._check_generate_outputs(
|
||||
output, config, use_cache=use_cache, num_return_sequences=num_return_sequences, num_beams=num_beams
|
||||
)
|
||||
|
||||
def test_sdpa_can_dispatch_composite_models(self):
|
||||
"""
|
||||
Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model.
|
||||
This tests only by looking at layer names, as usually SDPA layers call "SDPAAttention".
|
||||
In contrast to the above test, this one checks if the "config._attn_implementation" is a dict after the model
|
||||
is loaded, because we manually replicate requested attn implementation on each sub-config when loading.
|
||||
See https://github.com/huggingface/transformers/pull/32238 for more info
|
||||
|
||||
The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model
|
||||
that has a different set of sub-configs has to overwrite this test.
|
||||
"""
|
||||
if not self.has_attentions:
|
||||
self.skipTest(reason="Model architecture does not support attentions")
|
||||
|
||||
if not self._is_composite:
|
||||
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
model = model_class(config)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
model.save_pretrained(tmpdirname)
|
||||
model_sdpa = model_class.from_pretrained(tmpdirname)
|
||||
model_sdpa = model_sdpa.eval().to(torch_device)
|
||||
|
||||
# `None` as it is the requested one which will be assigned to each sub-config
|
||||
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
|
||||
self.assertTrue(model.language_model.config._attn_implementation == "sdpa")
|
||||
self.assertTrue(model.vision_model.config._attn_implementation == "sdpa")
|
||||
self.assertTrue(model.qformer.config._attn_implementation == "eager")
|
||||
|
||||
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
|
||||
model_eager = model_eager.eval().to(torch_device)
|
||||
self.assertTrue(model_eager.config._attn_implementation == "eager")
|
||||
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
|
||||
self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager")
|
||||
self.assertTrue(model_eager.qformer.config._attn_implementation == "eager")
|
||||
|
||||
for name, submodule in model_eager.named_modules():
|
||||
class_name = submodule.__class__.__name__
|
||||
if (
|
||||
class_name.endswith("Attention")
|
||||
and getattr(submodule, "config", None)
|
||||
and submodule.config._attn_implementation == "sdpa"
|
||||
):
|
||||
raise ValueError("The eager model should not have SDPA attention layers")
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_video():
|
||||
video_file = hf_hub_download(
|
||||
repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset"
|
||||
)
|
||||
video = np.load(video_file)[::2] # sample every 2nd frame to get 4 frames total
|
||||
return video
|
||||
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
@require_bitsandbytes
|
||||
@require_accelerate
|
||||
@slow
|
||||
class InstructBlipVideoModelIntegrationTest(unittest.TestCase):
|
||||
def test_inference_vicuna_7b(self):
|
||||
processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
||||
model = InstructBlipVideoForConditionalGeneration.from_pretrained(
|
||||
"Salesforce/instructblip-vicuna-7b",
|
||||
load_in_8bit=True,
|
||||
)
|
||||
|
||||
clip = prepare_video()
|
||||
prompt = "Explain what is happening in this short video."
|
||||
inputs = processor(images=clip, text=prompt, return_tensors="pt").to(torch_device, torch.float16)
|
||||
|
||||
# verify generation
|
||||
outputs = model.generate(**inputs, max_new_tokens=30)
|
||||
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
|
||||
self.assertEqual(
|
||||
generated_text,
|
||||
"Explain what is happening in this short video. a baby girl wearing glasses is reading a book on the bed 1080p",
|
||||
)
|
||||
@@ -0,0 +1,185 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
BertTokenizerFast,
|
||||
GPT2Tokenizer,
|
||||
InstructBlipVideoProcessor,
|
||||
PreTrainedTokenizerFast,
|
||||
)
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import InstructBlipVideoVideoProcessor
|
||||
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
class InstructBlipVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
processor_class = InstructBlipVideoProcessor
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tmpdirname = tempfile.mkdtemp()
|
||||
|
||||
video_processor = InstructBlipVideoVideoProcessor()
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
|
||||
qformer_tokenizer = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert")
|
||||
|
||||
processor = InstructBlipVideoProcessor(video_processor, tokenizer, qformer_tokenizer)
|
||||
|
||||
processor.save_pretrained(cls.tmpdirname)
|
||||
|
||||
def get_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
|
||||
|
||||
def get_qformer_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).qformer_tokenizer
|
||||
|
||||
def prepare_processor_dict(self):
|
||||
return {"num_query_tokens": 1}
|
||||
|
||||
def get_video_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
||||
|
||||
def test_save_load_pretrained_additional_features(self):
|
||||
processor = InstructBlipVideoProcessor(
|
||||
tokenizer=self.get_tokenizer(),
|
||||
video_processor=self.get_video_processor(),
|
||||
qformer_tokenizer=self.get_qformer_tokenizer(),
|
||||
)
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
processor.save_pretrained(tmpdir)
|
||||
|
||||
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
video_processor_add_kwargs = self.get_video_processor(do_normalize=False, padding_value=1.0)
|
||||
|
||||
processor = InstructBlipVideoProcessor.from_pretrained(
|
||||
tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
|
||||
|
||||
self.assertEqual(processor.video_processor.to_json_string(), video_processor_add_kwargs.to_json_string())
|
||||
self.assertIsInstance(processor.video_processor, InstructBlipVideoVideoProcessor)
|
||||
self.assertIsInstance(processor.qformer_tokenizer, BertTokenizerFast)
|
||||
|
||||
def test_video_processor(self):
|
||||
video_processor = self.get_video_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
qformer_tokenizer = self.get_qformer_tokenizer()
|
||||
processor_kwargs = self.prepare_processor_dict()
|
||||
|
||||
processor = InstructBlipVideoProcessor(
|
||||
tokenizer=tokenizer,
|
||||
video_processor=video_processor,
|
||||
qformer_tokenizer=qformer_tokenizer,
|
||||
**processor_kwargs,
|
||||
)
|
||||
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
input_feat_extract = video_processor(image_input, return_tensors="pt")
|
||||
input_processor = processor(images=image_input, return_tensors="pt")
|
||||
|
||||
for key in input_feat_extract:
|
||||
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
|
||||
|
||||
def test_tokenizer(self):
|
||||
video_processor = self.get_video_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
qformer_tokenizer = self.get_qformer_tokenizer()
|
||||
processor_kwargs = self.prepare_processor_dict()
|
||||
|
||||
processor = InstructBlipVideoProcessor(
|
||||
tokenizer=tokenizer,
|
||||
video_processor=video_processor,
|
||||
qformer_tokenizer=qformer_tokenizer,
|
||||
**processor_kwargs,
|
||||
)
|
||||
|
||||
input_str = ["lower newer"]
|
||||
encoded_processor = processor(text=input_str)
|
||||
encoded_tokens = tokenizer(input_str, return_token_type_ids=False)
|
||||
encoded_tokens_qformer = qformer_tokenizer(input_str, return_token_type_ids=False)
|
||||
|
||||
for key in encoded_tokens:
|
||||
self.assertListEqual(encoded_tokens[key], encoded_processor[key])
|
||||
|
||||
for key in encoded_tokens_qformer:
|
||||
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key])
|
||||
|
||||
def test_processor(self):
|
||||
video_processor = self.get_video_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
qformer_tokenizer = self.get_qformer_tokenizer()
|
||||
processor_kwargs = self.prepare_processor_dict()
|
||||
|
||||
processor = InstructBlipVideoProcessor(
|
||||
tokenizer=tokenizer,
|
||||
video_processor=video_processor,
|
||||
qformer_tokenizer=qformer_tokenizer,
|
||||
**processor_kwargs,
|
||||
)
|
||||
|
||||
input_str = "lower newer"
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
inputs = processor(text=input_str, images=image_input)
|
||||
|
||||
self.assertListEqual(
|
||||
list(inputs.keys()),
|
||||
["qformer_input_ids", "qformer_attention_mask", "input_ids", "attention_mask", "pixel_values"],
|
||||
)
|
||||
|
||||
# test if it raises when no input is passed
|
||||
with pytest.raises(ValueError):
|
||||
processor()
|
||||
|
||||
def test_tokenizer_decode(self):
|
||||
video_processor = self.get_video_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
qformer_tokenizer = self.get_qformer_tokenizer()
|
||||
processor_kwargs = self.prepare_processor_dict()
|
||||
|
||||
processor = InstructBlipVideoProcessor(
|
||||
tokenizer=tokenizer,
|
||||
video_processor=video_processor,
|
||||
qformer_tokenizer=qformer_tokenizer,
|
||||
**processor_kwargs,
|
||||
)
|
||||
|
||||
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
|
||||
|
||||
decoded_processor = processor.batch_decode(predicted_ids)
|
||||
decoded_tok = tokenizer.batch_decode(predicted_ids)
|
||||
|
||||
self.assertListEqual(decoded_tok, decoded_processor)
|
||||
@@ -0,0 +1,116 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
if is_torchvision_available():
|
||||
from transformers import InstructBlipVideoVideoProcessor
|
||||
|
||||
|
||||
class InstructBlipVideoVideoProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=5,
|
||||
num_channels=3,
|
||||
num_frames=4,
|
||||
min_resolution=30,
|
||||
max_resolution=80,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
image_mean=OPENAI_CLIP_MEAN,
|
||||
image_std=OPENAI_CLIP_STD,
|
||||
do_convert_rgb=True,
|
||||
):
|
||||
super().__init__()
|
||||
size = size if size is not None else {"height": 18, "width": 18}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_frames = num_frames
|
||||
self.num_channels = num_channels
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
|
||||
def prepare_video_processor_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"do_normalize": self.do_normalize,
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_convert_rgb": self.do_convert_rgb,
|
||||
}
|
||||
|
||||
def expected_output_video_shape(self, images):
|
||||
return self.num_frames, self.num_channels, self.size["height"], self.size["width"]
|
||||
|
||||
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
|
||||
videos = prepare_video_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_frames=self.num_frames,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
return_tensors=return_tensors,
|
||||
)
|
||||
|
||||
return videos
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class InstructBlipVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
|
||||
fast_video_processing_class = InstructBlipVideoVideoProcessor if is_torchvision_available() else None
|
||||
input_name = "pixel_values"
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.video_processor_tester = InstructBlipVideoVideoProcessingTester(self)
|
||||
|
||||
@property
|
||||
def video_processor_dict(self):
|
||||
return self.video_processor_tester.prepare_video_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
video_processing = self.fast_video_processing_class(**self.video_processor_dict)
|
||||
self.assertTrue(hasattr(video_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(video_processing, "size"))
|
||||
self.assertTrue(hasattr(video_processing, "do_normalize"))
|
||||
self.assertTrue(hasattr(video_processing, "image_mean"))
|
||||
self.assertTrue(hasattr(video_processing, "image_std"))
|
||||
self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
|
||||
|
||||
def test_video_processor_from_dict_with_kwargs(self):
|
||||
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
|
||||
self.assertEqual(video_processor.size, {"height": 18, "width": 18})
|
||||
|
||||
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
|
||||
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
|
||||
Reference in New Issue
Block a user