init
This commit is contained in:
0
transformers/tests/models/glm4v/__init__.py
Normal file
0
transformers/tests/models/glm4v/__init__.py
Normal file
254
transformers/tests/models/glm4v/test_image_processing_glm4v.py
Normal file
254
transformers/tests/models/glm4v/test_image_processing_glm4v.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# Copyright 2021 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import Glm4vImageProcessor
|
||||
from transformers.models.glm4v.image_processing_glm4v import smart_resize
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import Glm4vImageProcessorFast
|
||||
|
||||
|
||||
class Glm4vImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
min_resolution=30,
|
||||
max_resolution=80,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
image_mean=[0.5, 0.5, 0.5],
|
||||
image_std=[0.5, 0.5, 0.5],
|
||||
temporal_patch_size=2,
|
||||
patch_size=14,
|
||||
merge_size=2,
|
||||
):
|
||||
size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.temporal_patch_size = temporal_patch_size
|
||||
self.patch_size = patch_size
|
||||
self.merge_size = merge_size
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_normalize": self.do_normalize,
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"temporal_patch_size": self.temporal_patch_size,
|
||||
"patch_size": self.patch_size,
|
||||
"merge_size": self.merge_size,
|
||||
}
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
grid_t = 1
|
||||
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
|
||||
seq_len = 0
|
||||
for image in images:
|
||||
if isinstance(image, list) and isinstance(image[0], Image.Image):
|
||||
image = np.stack([np.array(frame) for frame in image])
|
||||
elif hasattr(image, "shape"):
|
||||
pass
|
||||
else:
|
||||
image = np.array(image)
|
||||
if hasattr(image, "shape") and len(image.shape) >= 3:
|
||||
if isinstance(image, np.ndarray):
|
||||
if len(image.shape) == 4:
|
||||
height, width = image.shape[1:3]
|
||||
elif len(image.shape) == 3:
|
||||
height, width = image.shape[:2]
|
||||
else:
|
||||
height, width = self.min_resolution, self.min_resolution
|
||||
else:
|
||||
height, width = image.shape[-2:]
|
||||
else:
|
||||
height, width = self.min_resolution, self.min_resolution
|
||||
|
||||
resized_height, resized_width = smart_resize(
|
||||
self.temporal_patch_size,
|
||||
height,
|
||||
width,
|
||||
factor=self.patch_size * self.merge_size,
|
||||
min_pixels=self.size["shortest_edge"],
|
||||
max_pixels=self.size["longest_edge"],
|
||||
)
|
||||
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
|
||||
seq_len += grid_t * grid_h * grid_w
|
||||
return (seq_len, hidden_dim)
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class ViTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
image_processing_class = Glm4vImageProcessor if is_vision_available() else None
|
||||
fast_image_processing_class = Glm4vImageProcessorFast if is_torchvision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = Glm4vImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "image_mean"))
|
||||
self.assertTrue(hasattr(image_processing, "image_std"))
|
||||
self.assertTrue(hasattr(image_processing, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processing, "size"))
|
||||
|
||||
def test_image_processor_from_dict_with_kwargs(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict)
|
||||
self.assertEqual(image_processor.size, {"shortest_edge": 10, "longest_edge": 20})
|
||||
|
||||
image_processor = image_processing_class.from_dict(
|
||||
self.image_processor_dict, size={"shortest_edge": 42, "longest_edge": 42}
|
||||
)
|
||||
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 42})
|
||||
|
||||
# batch size is flattened
|
||||
def test_call_pil(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL images
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
def test_call_numpy(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
def test_call_pytorch(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
# Test batched
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
def test_call_numpy_4_channels(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Test that can process images which have an arbitrary number of channels
|
||||
# Initialize image_processing
|
||||
image_processor = image_processing_class(**self.image_processor_dict)
|
||||
|
||||
# create random numpy tensors
|
||||
self.image_processor_tester.num_channels = 4
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processor(
|
||||
image_inputs[0],
|
||||
return_tensors="pt",
|
||||
input_data_format="channels_last",
|
||||
image_mean=0,
|
||||
image_std=1,
|
||||
).pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processor(
|
||||
image_inputs,
|
||||
return_tensors="pt",
|
||||
input_data_format="channels_last",
|
||||
image_mean=0,
|
||||
image_std=1,
|
||||
).pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
|
||||
578
transformers/tests/models/glm4v/test_modeling_glm4v.py
Normal file
578
transformers/tests/models/glm4v/test_modeling_glm4v.py
Normal file
@@ -0,0 +1,578 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch GLM-4.1V model."""
|
||||
|
||||
import copy
|
||||
import unittest
|
||||
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
Glm4vConfig,
|
||||
Glm4vForConditionalGeneration,
|
||||
Glm4vModel,
|
||||
is_torch_available,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
cleanup,
|
||||
require_flash_attn,
|
||||
require_torch,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import (
|
||||
ModelTesterMixin,
|
||||
floats_tensor,
|
||||
ids_tensor,
|
||||
)
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
class Glm4vVisionText2TextModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=3,
|
||||
seq_length=7,
|
||||
num_channels=3,
|
||||
ignore_index=-100,
|
||||
image_size=112,
|
||||
video_start_token_id=3,
|
||||
video_end_token_id=4,
|
||||
image_start_token_id=5,
|
||||
image_end_token_id=6,
|
||||
image_token_id=7,
|
||||
video_token_id=8,
|
||||
is_training=True,
|
||||
text_config={
|
||||
"vocab_size": 99,
|
||||
"hidden_size": 16,
|
||||
"intermediate_size": 22,
|
||||
"num_hidden_layers": 2,
|
||||
"num_attention_heads": 2,
|
||||
"num_key_value_heads": 1,
|
||||
"output_channels": 64,
|
||||
"hidden_act": "silu",
|
||||
"max_position_embeddings": 512,
|
||||
"rope_scaling": {"type": "default", "mrope_section": [2, 1, 1]},
|
||||
"rope_theta": 10000,
|
||||
"tie_word_embeddings": True,
|
||||
"bos_token_id": 0,
|
||||
"eos_token_id": 0,
|
||||
"pad_token_id": 0,
|
||||
},
|
||||
vision_config={
|
||||
"depth": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 48,
|
||||
"out_hidden_size": 16,
|
||||
"intermediate_size": 22,
|
||||
"patch_size": 14,
|
||||
"spatial_merge_size": 1,
|
||||
"temporal_patch_size": 2,
|
||||
},
|
||||
):
|
||||
self.parent = parent
|
||||
self.ignore_index = ignore_index
|
||||
self.bos_token_id = text_config["bos_token_id"]
|
||||
self.eos_token_id = text_config["eos_token_id"]
|
||||
self.pad_token_id = text_config["pad_token_id"]
|
||||
self.video_start_token_id = video_start_token_id
|
||||
self.video_end_token_id = video_end_token_id
|
||||
self.image_start_token_id = image_start_token_id
|
||||
self.image_end_token_id = image_end_token_id
|
||||
self.image_token_id = image_token_id
|
||||
self.video_token_id = video_token_id
|
||||
self.text_config = text_config
|
||||
self.vision_config = vision_config
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.is_training = is_training
|
||||
self.hidden_size = text_config["hidden_size"]
|
||||
self.num_hidden_layers = text_config["num_hidden_layers"]
|
||||
self.num_attention_heads = text_config["num_attention_heads"]
|
||||
self.vocab_size = text_config["vocab_size"]
|
||||
self.num_image_tokens = 64
|
||||
self.seq_length = seq_length + self.num_image_tokens
|
||||
|
||||
def get_config(self):
|
||||
return Glm4vConfig(
|
||||
text_config=self.text_config,
|
||||
vision_config=self.vision_config,
|
||||
image_token_id=self.image_token_id,
|
||||
video_token_id=self.video_token_id,
|
||||
video_start_token_id=self.video_start_token_id,
|
||||
video_end_token_id=self.video_end_token_id,
|
||||
image_start_token_id=self.image_start_token_id,
|
||||
image_end_token_id=self.image_end_token_id,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
config = self.get_config()
|
||||
patch_size = config.vision_config.patch_size
|
||||
temporal_patch_size = config.vision_config.temporal_patch_size
|
||||
pixel_values = floats_tensor(
|
||||
[
|
||||
self.batch_size * (self.image_size**2) // (patch_size**2),
|
||||
self.num_channels * (patch_size**2) * temporal_patch_size,
|
||||
]
|
||||
)
|
||||
|
||||
return config, pixel_values
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
config, pixel_values = config_and_inputs
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
|
||||
|
||||
input_ids[input_ids == self.video_token_id] = self.pad_token_id
|
||||
input_ids[input_ids == self.image_token_id] = self.pad_token_id
|
||||
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
|
||||
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
|
||||
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
|
||||
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
|
||||
|
||||
input_ids[:, 0] = self.image_start_token_id
|
||||
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
|
||||
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
|
||||
patch_size = config.vision_config.patch_size
|
||||
patches_per_side = self.image_size // patch_size
|
||||
|
||||
inputs_dict = {
|
||||
"pixel_values": pixel_values,
|
||||
"image_grid_thw": torch.tensor(
|
||||
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
|
||||
),
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class Glm4vModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (Glm4vModel, Glm4vForConditionalGeneration) if is_torch_available() else ()
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
test_torchscript = False
|
||||
model_split_percents = [0.7, 0.9] # model too big to split at 0.5
|
||||
_is_composite = True
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = Glm4vVisionText2TextModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=Glm4vConfig, has_text_modality=False)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
# GLM4V has images shaped as (bs*patch_len, dim) so we can't slice to batches in generate
|
||||
def prepare_config_and_inputs_for_generate(self, batch_size=2):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# We don't want a few model inputs in our model input dictionary for generation tests
|
||||
input_keys_to_ignore = [
|
||||
# we don't want to mask attention heads
|
||||
"head_mask",
|
||||
"decoder_head_mask",
|
||||
"cross_attn_head_mask",
|
||||
# we don't want encoder-decoder models to start from filled decoder ids
|
||||
"decoder_input_ids",
|
||||
"decoder_attention_mask",
|
||||
# we'll set cache use in each test differently
|
||||
"use_cache",
|
||||
# Ignore labels if it is in the input dict
|
||||
"labels",
|
||||
# model-specific exceptions should overload/overwrite this function
|
||||
]
|
||||
|
||||
# The diff from the general `prepare_config_and_inputs_for_generate` lies here
|
||||
patch_size = config.vision_config.patch_size
|
||||
filtered_image_length = batch_size * (self.model_tester.image_size**2) // (patch_size**2)
|
||||
filtered_inputs_dict = {
|
||||
k: v[:batch_size, ...] if isinstance(v, torch.Tensor) else v
|
||||
for k, v in inputs_dict.items()
|
||||
if k not in input_keys_to_ignore
|
||||
}
|
||||
filtered_inputs_dict["pixel_values"] = inputs_dict["pixel_values"][:filtered_image_length]
|
||||
|
||||
# It is important set `eos_token_id` to `None` to avoid early stopping (would break for length-based checks)
|
||||
text_gen_config = config.get_text_config(decoder=True)
|
||||
if text_gen_config.eos_token_id is not None and text_gen_config.pad_token_id is None:
|
||||
text_gen_config.pad_token_id = (
|
||||
text_gen_config.eos_token_id
|
||||
if isinstance(text_gen_config.eos_token_id, int)
|
||||
else text_gen_config.eos_token_id[0]
|
||||
)
|
||||
text_gen_config.eos_token_id = None
|
||||
text_gen_config.forced_eos_token_id = None
|
||||
|
||||
return config, filtered_inputs_dict
|
||||
|
||||
@unittest.skip(reason="No available kernels - not supported")
|
||||
def test_sdpa_can_dispatch_on_flash(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Size mismatch")
|
||||
def test_multi_gpu_data_parallel_forward(self):
|
||||
pass
|
||||
|
||||
@unittest.skip("Error with compilation")
|
||||
def test_generate_from_inputs_embeds_with_static_cache(self):
|
||||
pass
|
||||
|
||||
def test_inputs_embeds(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
input_ids = inputs["input_ids"]
|
||||
del inputs["input_ids"]
|
||||
del inputs["pixel_values"]
|
||||
del inputs["image_grid_thw"]
|
||||
|
||||
wte = model.get_input_embeddings()
|
||||
inputs["inputs_embeds"] = wte(input_ids)
|
||||
with torch.no_grad():
|
||||
model(**inputs)[0]
|
||||
|
||||
def test_inputs_embeds_matches_input_ids(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
input_ids = inputs["input_ids"]
|
||||
del inputs["input_ids"]
|
||||
del inputs["pixel_values"]
|
||||
del inputs["image_grid_thw"]
|
||||
|
||||
inputs_embeds = model.get_input_embeddings()(input_ids)
|
||||
|
||||
with torch.no_grad():
|
||||
out_ids = model(input_ids=input_ids, **inputs)[0]
|
||||
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
|
||||
torch.testing.assert_close(out_embeds, out_ids)
|
||||
|
||||
|
||||
@require_torch
|
||||
class Glm4vIntegrationTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
self.processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
|
||||
self.message = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
|
||||
},
|
||||
{"type": "text", "text": "What kind of dog is this?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
self.message2 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image",
|
||||
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png",
|
||||
},
|
||||
{"type": "text", "text": "What kind of dog is this?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
def tearDown(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
|
||||
)
|
||||
|
||||
inputs = self.processor.apply_chat_template(
|
||||
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
|
||||
)
|
||||
expected_input_ids = [151331, 151333, 151336, 198, 151339, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343, 151343] # fmt: skip
|
||||
assert expected_input_ids == inputs.input_ids[0].tolist()[:17]
|
||||
|
||||
expected_pixel_slice = torch.tensor(
|
||||
[
|
||||
[-0.0988, -0.0842, -0.0842],
|
||||
[-0.5660, -0.5514, -0.4200],
|
||||
[-0.0259, -0.0259, -0.0259],
|
||||
[-0.1280, -0.0988, -0.2010],
|
||||
[-0.4638, -0.5806, -0.6974],
|
||||
[-1.2083, -1.2229, -1.2083],
|
||||
],
|
||||
dtype=torch.float32,
|
||||
device="cpu",
|
||||
)
|
||||
assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3)
|
||||
|
||||
# verify generation
|
||||
inputs = inputs.to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
EXPECTED_DECODED_TEXT = "\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically"
|
||||
self.assertEqual(
|
||||
self.processor.decode(output[0], skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_batch(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
|
||||
)
|
||||
batch_messages = [self.message] * 2
|
||||
inputs = self.processor.apply_chat_template(
|
||||
batch_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
# it should not matter whether two images are the same size or not
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture has a stocky body, thick fur, and a face that's"
|
||||
] # fmt: skip
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_with_video(self):
|
||||
processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking", max_image_size={"longest_edge": 50176})
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype=torch.float16, device_map="auto"
|
||||
)
|
||||
questions = ["Describe this video."]
|
||||
video_urls = ["https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4"]
|
||||
messages = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "video",
|
||||
"video": video_url,
|
||||
},
|
||||
{"type": "text", "text": question},
|
||||
],
|
||||
}
|
||||
]
|
||||
for question, video_url in zip(questions, video_urls)
|
||||
]
|
||||
inputs = processor.apply_chat_template(
|
||||
messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
EXPECTED_DECODED_TEXT = ["\n012345Describe this video.\n<think>Got it, let's analyze the video. First, the scene is an indoor tennis court. There are two players: one in a white shirt"] # fmt: skip
|
||||
|
||||
self.assertEqual(
|
||||
processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_expand(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
|
||||
)
|
||||
inputs = self.processor.apply_chat_template(
|
||||
self.message, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt"
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
output = model.generate(**inputs, max_new_tokens=30, do_sample=False, num_beams=2, num_return_sequences=2)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat, specifically"
|
||||
] # fmt: skip
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_batch_wo_image(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
|
||||
)
|
||||
message_wo_image = [
|
||||
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
|
||||
]
|
||||
batched_messages = [self.message, message_wo_image]
|
||||
inputs = self.processor.apply_chat_template(
|
||||
batched_messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
# it should not matter whether two images are the same size or not
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
|
||||
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
|
||||
] # fmt: skip
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_small_model_integration_test_batch_different_resolutions(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", dtype="auto", device_map="auto"
|
||||
)
|
||||
batched_messages = [self.message, self.message2]
|
||||
inputs = self.processor.apply_chat_template(
|
||||
batched_messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
# it should not matter whether two images are the same size or not
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but",
|
||||
] # fmt: skip
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
@require_flash_attn
|
||||
@require_torch_gpu
|
||||
def test_small_model_integration_test_batch_flashatt2(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking",
|
||||
dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
device_map="auto",
|
||||
)
|
||||
batched_messages = [self.message, self.message2]
|
||||
inputs = self.processor.apply_chat_template(
|
||||
batched_messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
# it should not matter whether two images are the same size or not
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog. Wait, it's a cat,",
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. Wait, the animals here are cats, not dogs. The question is about a dog, but"
|
||||
] # fmt: skip
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
|
||||
@slow
|
||||
@require_flash_attn
|
||||
@require_torch_gpu
|
||||
def test_small_model_integration_test_batch_wo_image_flashatt2(self):
|
||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking",
|
||||
dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
device_map="auto",
|
||||
)
|
||||
message_wo_image = [
|
||||
{"role": "user", "content": [{"type": "text", "text": "Who are you?"}]},
|
||||
]
|
||||
batched_messages = [self.message, message_wo_image]
|
||||
inputs = self.processor.apply_chat_template(
|
||||
batched_messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
).to(torch_device)
|
||||
|
||||
# This model on the hub has `do_sample=True`.
|
||||
torch.manual_seed(42)
|
||||
|
||||
# it should not matter whether two images are the same size or not
|
||||
output = model.generate(**inputs, max_new_tokens=30)
|
||||
|
||||
EXPECTED_DECODED_TEXT = [
|
||||
"\nWhat kind of dog is this?\n<think>Got it, let's look at the image. The animal in the picture doesn't look like a dog; it's actually a cat. Specifically",
|
||||
"\nWho are you?\n<think>Got it, let's look at the user's question: \"Who are you?\" This is a common question when someone is just starting a conversation"
|
||||
] # fmt: skip
|
||||
|
||||
self.assertEqual(
|
||||
self.processor.batch_decode(output, skip_special_tokens=True),
|
||||
EXPECTED_DECODED_TEXT,
|
||||
)
|
||||
279
transformers/tests/models/glm4v/test_processor_glm4v.py
Normal file
279
transformers/tests/models/glm4v/test_processor_glm4v.py
Normal file
@@ -0,0 +1,279 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import AutoProcessor
|
||||
from transformers.testing_utils import require_av, require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from transformers import Glm4vProcessor
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
@require_vision
|
||||
@require_torch
|
||||
class Glm4vProcessorTest(ProcessorTesterMixin, unittest.TestCase):
|
||||
processor_class = Glm4vProcessor
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tmpdirname = tempfile.mkdtemp()
|
||||
processor = Glm4vProcessor.from_pretrained(
|
||||
"THUDM/GLM-4.1V-9B-Thinking", patch_size=4, size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18}
|
||||
)
|
||||
processor.save_pretrained(cls.tmpdirname)
|
||||
cls.image_token = processor.image_token
|
||||
|
||||
def get_tokenizer(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
|
||||
|
||||
def get_video_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
|
||||
|
||||
def get_processor(self, **kwargs):
|
||||
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
||||
|
||||
@require_torch
|
||||
@require_av
|
||||
def _test_apply_chat_template(
|
||||
self,
|
||||
modality: str,
|
||||
batch_size: int,
|
||||
return_tensors: str,
|
||||
input_name: str,
|
||||
processor_name: str,
|
||||
input_data: list[str],
|
||||
):
|
||||
processor = self.get_processor()
|
||||
if processor.chat_template is None:
|
||||
self.skipTest("Processor has no chat template")
|
||||
|
||||
if processor_name not in self.processor_class.attributes:
|
||||
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
|
||||
|
||||
batch_messages = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{"type": "text", "text": "Describe this."}],
|
||||
},
|
||||
]
|
||||
] * batch_size
|
||||
|
||||
# Test that jinja can be applied
|
||||
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
|
||||
self.assertEqual(len(formatted_prompt), batch_size)
|
||||
|
||||
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
|
||||
formatted_prompt_tokenized = processor.apply_chat_template(
|
||||
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
|
||||
)
|
||||
add_special_tokens = True
|
||||
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
|
||||
add_special_tokens = False
|
||||
tok_output = processor.tokenizer(
|
||||
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
|
||||
)
|
||||
expected_output = tok_output.input_ids
|
||||
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
|
||||
|
||||
# Test that kwargs passed to processor's `__call__` are actually used
|
||||
tokenized_prompt_100 = processor.apply_chat_template(
|
||||
batch_messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_tensors=return_tensors,
|
||||
max_length=100,
|
||||
)
|
||||
self.assertEqual(len(tokenized_prompt_100[0]), 100)
|
||||
|
||||
# Test that `return_dict=True` returns text related inputs in the dict
|
||||
out_dict_text = processor.apply_chat_template(
|
||||
batch_messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors=return_tensors,
|
||||
)
|
||||
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
|
||||
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
|
||||
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
|
||||
|
||||
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
|
||||
for idx, url in enumerate(input_data[:batch_size]):
|
||||
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
|
||||
|
||||
out_dict = processor.apply_chat_template(
|
||||
batch_messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors=return_tensors,
|
||||
fps=2
|
||||
if isinstance(input_data[0], str)
|
||||
else None, # by default no more than 2 frames per second, otherwise too slow
|
||||
do_sample_frames=bool(isinstance(input_data[0], str)), # don't sample frames if decoded video is used
|
||||
)
|
||||
input_name = getattr(self, input_name)
|
||||
self.assertTrue(input_name in out_dict)
|
||||
self.assertEqual(len(out_dict["input_ids"]), batch_size)
|
||||
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
|
||||
|
||||
if modality == "video":
|
||||
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
|
||||
expected_video_token_count = 0
|
||||
for thw in out_dict["video_grid_thw"]:
|
||||
expected_video_token_count += thw[0] * thw[1] * thw[2]
|
||||
mm_len = expected_video_token_count
|
||||
else:
|
||||
mm_len = batch_size * 4
|
||||
self.assertEqual(len(out_dict[input_name]), mm_len)
|
||||
|
||||
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
|
||||
for k in out_dict:
|
||||
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
|
||||
|
||||
@require_av
|
||||
def test_apply_chat_template_video_frame_sampling(self):
|
||||
processor = self.get_processor()
|
||||
if processor.chat_template is None:
|
||||
self.skipTest("Processor has no chat template")
|
||||
|
||||
signature = inspect.signature(processor.__call__)
|
||||
if "videos" not in {*signature.parameters.keys()} or (
|
||||
signature.parameters.get("videos") is not None
|
||||
and signature.parameters["videos"].annotation == inspect._empty
|
||||
):
|
||||
self.skipTest("Processor doesn't accept videos at input")
|
||||
|
||||
messages = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "What is shown in this video?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
]
|
||||
|
||||
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
|
||||
self.assertEqual(len(formatted_prompt), 1)
|
||||
|
||||
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
|
||||
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
|
||||
self.assertListEqual(expected_output, formatted_prompt_tokenized)
|
||||
|
||||
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
|
||||
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
|
||||
|
||||
# Add video URL for return dict and load with `num_frames` arg
|
||||
messages[0][0]["content"][0] = {
|
||||
"type": "video",
|
||||
"url": url_to_local_path(
|
||||
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
|
||||
),
|
||||
}
|
||||
|
||||
# Load with `video_fps` arg
|
||||
video_fps = 10
|
||||
out_dict_with_video = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
video_fps=video_fps,
|
||||
)
|
||||
self.assertTrue(self.videos_input_name in out_dict_with_video)
|
||||
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 8)
|
||||
|
||||
# Load the whole video
|
||||
out_dict_with_video = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
do_sample_frames=False,
|
||||
)
|
||||
self.assertTrue(self.videos_input_name in out_dict_with_video)
|
||||
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 24)
|
||||
|
||||
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
|
||||
# because we assume they come from one video
|
||||
messages[0][0]["content"][0] = {
|
||||
"type": "video",
|
||||
"url": [
|
||||
url_to_local_path(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
|
||||
),
|
||||
url_to_local_path(
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
|
||||
),
|
||||
],
|
||||
}
|
||||
out_dict_with_video = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
do_sample_frames=False,
|
||||
)
|
||||
self.assertTrue(self.videos_input_name in out_dict_with_video)
|
||||
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
|
||||
|
||||
# When the inputs are frame URLs/paths we expect that those are already
|
||||
# sampled and will raise an error is asked to sample again.
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
|
||||
):
|
||||
out_dict_with_video = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
do_sample_frames=True,
|
||||
)
|
||||
|
||||
def test_model_input_names(self):
|
||||
processor = self.get_processor()
|
||||
|
||||
text = self.prepare_text_inputs(modalities=["image", "video"])
|
||||
image_input = self.prepare_image_inputs()
|
||||
video_inputs = self.prepare_video_inputs()
|
||||
inputs_dict = {"text": text, "images": image_input, "videos": video_inputs}
|
||||
inputs = processor(**inputs_dict, return_tensors="pt", do_sample_frames=False)
|
||||
|
||||
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
|
||||
334
transformers/tests/models/glm4v/test_video_processing_glm4v.py
Normal file
334
transformers/tests/models/glm4v/test_video_processing_glm4v.py
Normal file
@@ -0,0 +1,334 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
|
||||
from transformers.testing_utils import require_torch, require_vision
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from PIL import Image
|
||||
|
||||
if is_vision_available():
|
||||
if is_torchvision_available():
|
||||
from transformers import Glm4vVideoProcessor
|
||||
from transformers.models.glm4v.video_processing_glm4v import smart_resize
|
||||
|
||||
|
||||
class Glm4vVideoProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=5,
|
||||
num_frames=8,
|
||||
num_channels=3,
|
||||
min_resolution=30,
|
||||
max_resolution=80,
|
||||
temporal_patch_size=2,
|
||||
patch_size=14,
|
||||
merge_size=2,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
do_normalize=True,
|
||||
image_mean=IMAGENET_STANDARD_MEAN,
|
||||
image_std=IMAGENET_STANDARD_STD,
|
||||
do_convert_rgb=True,
|
||||
):
|
||||
size = size if size is not None else {"longest_edge": 20, "shortest_edge": 10}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_frames = num_frames
|
||||
self.num_channels = num_channels
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
self.temporal_patch_size = temporal_patch_size
|
||||
self.patch_size = patch_size
|
||||
self.merge_size = merge_size
|
||||
|
||||
def prepare_video_processor_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"do_normalize": self.do_normalize,
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_convert_rgb": self.do_convert_rgb,
|
||||
"do_sample_frames": True,
|
||||
}
|
||||
|
||||
def prepare_video_metadata(self, videos):
|
||||
video_metadata = []
|
||||
for video in videos:
|
||||
if isinstance(video, list):
|
||||
num_frames = len(video)
|
||||
elif hasattr(video, "shape"):
|
||||
if len(video.shape) == 4: # (T, H, W, C)
|
||||
num_frames = video.shape[0]
|
||||
else:
|
||||
num_frames = 1
|
||||
else:
|
||||
num_frames = self.num_frames
|
||||
|
||||
metadata = {
|
||||
"fps": 2,
|
||||
"duration": num_frames / 2,
|
||||
"total_num_frames": num_frames,
|
||||
}
|
||||
video_metadata.append(metadata)
|
||||
return video_metadata
|
||||
|
||||
def expected_output_video_shape(self, videos):
|
||||
grid_t = self.num_frames // self.temporal_patch_size
|
||||
hidden_dim = self.num_channels * self.temporal_patch_size * self.patch_size * self.patch_size
|
||||
seq_len = 0
|
||||
for video in videos:
|
||||
if isinstance(video, list) and isinstance(video[0], Image.Image):
|
||||
video = np.stack([np.array(frame) for frame in video])
|
||||
elif hasattr(video, "shape"):
|
||||
pass
|
||||
else:
|
||||
video = np.array(video)
|
||||
|
||||
if hasattr(video, "shape") and len(video.shape) >= 3:
|
||||
if len(video.shape) == 4:
|
||||
t, height, width = video.shape[:3]
|
||||
elif len(video.shape) == 3:
|
||||
height, width = video.shape[:2]
|
||||
t = 1
|
||||
else:
|
||||
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
|
||||
else:
|
||||
t, height, width = self.num_frames, self.min_resolution, self.min_resolution
|
||||
|
||||
resized_height, resized_width = smart_resize(
|
||||
t,
|
||||
height,
|
||||
width,
|
||||
factor=self.patch_size * self.merge_size,
|
||||
min_pixels=self.size["shortest_edge"],
|
||||
max_pixels=self.size["longest_edge"],
|
||||
)
|
||||
grid_h, grid_w = resized_height // self.patch_size, resized_width // self.patch_size
|
||||
seq_len += grid_t * grid_h * grid_w
|
||||
return [seq_len, hidden_dim]
|
||||
|
||||
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
|
||||
videos = prepare_video_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_frames=self.num_frames,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
return_tensors=return_tensors,
|
||||
)
|
||||
return videos
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class Glm4vVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
|
||||
fast_video_processing_class = Glm4vVideoProcessor if is_torchvision_available() else None
|
||||
input_name = "pixel_values_videos"
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.video_processor_tester = Glm4vVideoProcessingTester(self)
|
||||
|
||||
@property
|
||||
def video_processor_dict(self):
|
||||
return self.video_processor_tester.prepare_video_processor_dict()
|
||||
|
||||
def test_video_processor_from_dict_with_kwargs(self):
|
||||
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
|
||||
self.assertEqual(video_processor.size, {"longest_edge": 20, "shortest_edge": 10})
|
||||
|
||||
video_processor = self.fast_video_processing_class.from_dict(
|
||||
self.video_processor_dict, size={"longest_edge": 42, "shortest_edge": 42}
|
||||
)
|
||||
self.assertEqual(video_processor.size, {"longest_edge": 42, "shortest_edge": 42})
|
||||
|
||||
def test_call_pil(self):
|
||||
for video_processing_class in self.video_processor_list:
|
||||
video_processing = video_processing_class(**self.video_processor_dict)
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False, return_tensors="pil"
|
||||
)
|
||||
|
||||
for video in video_inputs:
|
||||
self.assertIsInstance(video[0], Image.Image)
|
||||
|
||||
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
|
||||
encoded_videos = video_processing(
|
||||
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
|
||||
self.input_name
|
||||
]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
def test_call_numpy(self):
|
||||
for video_processing_class in self.video_processor_list:
|
||||
video_processing = video_processing_class(**self.video_processor_dict)
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False, return_tensors="np"
|
||||
)
|
||||
|
||||
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
|
||||
encoded_videos = video_processing(
|
||||
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
|
||||
self.input_name
|
||||
]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
def test_call_pytorch(self):
|
||||
for video_processing_class in self.video_processor_list:
|
||||
video_processing = video_processing_class(**self.video_processor_dict)
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False, return_tensors="pt"
|
||||
)
|
||||
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
|
||||
encoded_videos = video_processing(
|
||||
video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt"
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[
|
||||
self.input_name
|
||||
]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
@unittest.skip("Skip for now, the test needs adjustment for GLM-4.1V")
|
||||
def test_call_numpy_4_channels(self):
|
||||
for video_processing_class in self.video_processor_list:
|
||||
# Test that can process videos which have an arbitrary number of channels
|
||||
# Initialize video_processing
|
||||
video_processor = video_processing_class(**self.video_processor_dict)
|
||||
|
||||
# create random numpy tensors
|
||||
self.video_processor_tester.num_channels = 4
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False, return_tensors="np"
|
||||
)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = video_processor(
|
||||
video_inputs[0],
|
||||
return_tensors="pt",
|
||||
input_data_format="channels_last",
|
||||
image_mean=0,
|
||||
image_std=1,
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
# Test batched
|
||||
encoded_videos = video_processor(
|
||||
video_inputs,
|
||||
return_tensors="pt",
|
||||
input_data_format="channels_last",
|
||||
image_mean=0,
|
||||
image_std=1,
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
def test_nested_input(self):
|
||||
"""Tests that the processor can work with nested list where each video is a list of arrays"""
|
||||
for video_processing_class in self.video_processor_list:
|
||||
video_processing = video_processing_class(**self.video_processor_dict)
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False, return_tensors="np"
|
||||
)
|
||||
|
||||
video_inputs_nested = [list(video) for video in video_inputs]
|
||||
video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs)
|
||||
|
||||
# Test not batched input
|
||||
encoded_videos = video_processing(
|
||||
video_inputs_nested[0], video_metadata=[video_metadata[0]], return_tensors="pt"
|
||||
)[self.input_name]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]])
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
# Test batched
|
||||
encoded_videos = video_processing(video_inputs_nested, video_metadata=video_metadata, return_tensors="pt")[
|
||||
self.input_name
|
||||
]
|
||||
expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs)
|
||||
self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
|
||||
|
||||
def test_call_sample_frames(self):
|
||||
for video_processing_class in self.video_processor_list:
|
||||
video_processor_dict = self.video_processor_dict.copy()
|
||||
video_processing = video_processing_class(**video_processor_dict)
|
||||
|
||||
prev_num_frames = self.video_processor_tester.num_frames
|
||||
self.video_processor_tester.num_frames = 8
|
||||
prev_min_resolution = getattr(self.video_processor_tester, "min_resolution", None)
|
||||
prev_max_resolution = getattr(self.video_processor_tester, "max_resolution", None)
|
||||
self.video_processor_tester.min_resolution = 56
|
||||
self.video_processor_tester.max_resolution = 112
|
||||
|
||||
video_inputs = self.video_processor_tester.prepare_video_inputs(
|
||||
equal_resolution=False,
|
||||
return_tensors="torch",
|
||||
)
|
||||
|
||||
metadata = [[{"total_num_frames": 8, "fps": 4}]]
|
||||
batched_metadata = metadata * len(video_inputs)
|
||||
|
||||
encoded_videos = video_processing(video_inputs[0], return_tensors="pt", video_metadata=metadata)[
|
||||
self.input_name
|
||||
]
|
||||
encoded_videos_batched = video_processing(
|
||||
video_inputs, return_tensors="pt", video_metadata=batched_metadata
|
||||
)[self.input_name]
|
||||
|
||||
self.assertIsNotNone(encoded_videos)
|
||||
self.assertIsNotNone(encoded_videos_batched)
|
||||
self.assertEqual(len(encoded_videos.shape), 2)
|
||||
self.assertEqual(len(encoded_videos_batched.shape), 2)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
video_processing(video_inputs[0], return_tensors="pt")[self.input_name]
|
||||
|
||||
self.video_processor_tester.num_frames = prev_num_frames
|
||||
if prev_min_resolution is not None:
|
||||
self.video_processor_tester.min_resolution = prev_min_resolution
|
||||
if prev_max_resolution is not None:
|
||||
self.video_processor_tester.max_resolution = prev_max_resolution
|
||||
Reference in New Issue
Block a user