init
This commit is contained in:
@@ -0,0 +1,288 @@
|
||||
# Copyright 2025 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from datasets import load_dataset
|
||||
|
||||
from transformers import Phi4MultimodalFeatureExtractor
|
||||
from transformers.testing_utils import check_json_file_has_correct_format, require_torch
|
||||
from transformers.utils.import_utils import is_torch_available
|
||||
|
||||
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
global_rng = random.Random()
|
||||
|
||||
|
||||
def floats_list(shape, scale=1.0, rng=None, name=None):
|
||||
"""Creates a random float32 tensor"""
|
||||
if rng is None:
|
||||
rng = global_rng
|
||||
|
||||
values = []
|
||||
for batch_idx in range(shape[0]):
|
||||
values.append([])
|
||||
for _ in range(shape[1]):
|
||||
values[-1].append(rng.random() * scale)
|
||||
|
||||
return values
|
||||
|
||||
|
||||
class Phi4MultimodalFeatureExtractionTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
min_seq_length=400,
|
||||
max_seq_length=2000,
|
||||
feature_size=80,
|
||||
hop_length=160,
|
||||
win_length=400,
|
||||
padding_value=0.0,
|
||||
sampling_rate=16_000,
|
||||
return_attention_mask=False,
|
||||
do_normalize=True,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.min_seq_length = min_seq_length
|
||||
self.max_seq_length = max_seq_length
|
||||
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
|
||||
self.padding_value = padding_value
|
||||
self.sampling_rate = sampling_rate
|
||||
self.return_attention_mask = return_attention_mask
|
||||
self.do_normalize = do_normalize
|
||||
self.feature_size = feature_size
|
||||
self.win_length = win_length
|
||||
self.hop_length = hop_length
|
||||
|
||||
def prepare_feat_extract_dict(self):
|
||||
return {
|
||||
"feature_size": self.feature_size,
|
||||
"hop_length": self.hop_length,
|
||||
"win_length": self.win_length,
|
||||
"padding_value": self.padding_value,
|
||||
"sampling_rate": self.sampling_rate,
|
||||
"return_attention_mask": self.return_attention_mask,
|
||||
"do_normalize": self.do_normalize,
|
||||
}
|
||||
|
||||
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
|
||||
def _flatten(list_of_lists):
|
||||
return list(itertools.chain(*list_of_lists))
|
||||
|
||||
if equal_length:
|
||||
speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
|
||||
else:
|
||||
# make sure that inputs increase in size
|
||||
speech_inputs = [
|
||||
floats_list((x, self.feature_size))
|
||||
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
|
||||
]
|
||||
if numpify:
|
||||
speech_inputs = [np.asarray(x) for x in speech_inputs]
|
||||
return speech_inputs
|
||||
|
||||
|
||||
class Phi4MultimodalFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
|
||||
feature_extraction_class = Phi4MultimodalFeatureExtractor
|
||||
|
||||
def setUp(self):
|
||||
self.feat_extract_tester = Phi4MultimodalFeatureExtractionTester(self)
|
||||
|
||||
def test_feat_extract_from_and_save_pretrained(self):
|
||||
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
|
||||
check_json_file_has_correct_format(saved_file)
|
||||
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
|
||||
|
||||
dict_first = feat_extract_first.to_dict()
|
||||
dict_second = feat_extract_second.to_dict()
|
||||
mel_1 = feat_extract_first.mel_filters
|
||||
mel_2 = feat_extract_second.mel_filters
|
||||
self.assertTrue(np.allclose(mel_1, mel_2))
|
||||
self.assertEqual(dict_first, dict_second)
|
||||
|
||||
def test_feat_extract_to_json_file(self):
|
||||
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
|
||||
feat_extract_first.to_json_file(json_file_path)
|
||||
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
|
||||
|
||||
dict_first = feat_extract_first.to_dict()
|
||||
dict_second = feat_extract_second.to_dict()
|
||||
mel_1 = feat_extract_first.mel_filters
|
||||
mel_2 = feat_extract_second.mel_filters
|
||||
self.assertTrue(np.allclose(mel_1, mel_2))
|
||||
self.assertEqual(dict_first, dict_second)
|
||||
|
||||
def test_feat_extract_from_pretrained_kwargs(self):
|
||||
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
|
||||
check_json_file_has_correct_format(saved_file)
|
||||
feat_extract_second = self.feature_extraction_class.from_pretrained(
|
||||
tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"]
|
||||
)
|
||||
|
||||
mel_1 = feat_extract_first.mel_filters
|
||||
mel_2 = feat_extract_second.mel_filters
|
||||
self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1])
|
||||
|
||||
def test_call(self):
|
||||
# Tests that all call wrap to encode_plus and batch_encode_plus
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
|
||||
# create three inputs of length 800, 1000, and 1200
|
||||
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
|
||||
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
|
||||
pt_speech_inputs = [torch.tensor(speech_input) for speech_input in speech_inputs]
|
||||
|
||||
# Test feature size
|
||||
input_features = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
|
||||
max_audio_len = (1200 - feature_extractor.win_length) // feature_extractor.hop_length + 1
|
||||
self.assertTrue(input_features.ndim == 3)
|
||||
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
|
||||
self.assertTrue(input_features.shape[-2] == max_audio_len)
|
||||
|
||||
# Test not batched input
|
||||
encoded_sequences_1 = feature_extractor(pt_speech_inputs[0], return_tensors="np").audio_input_features
|
||||
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").audio_input_features
|
||||
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
|
||||
|
||||
# Test batched
|
||||
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
|
||||
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
|
||||
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
|
||||
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
|
||||
|
||||
# Test 2-D numpy arrays are batched.
|
||||
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
|
||||
np_speech_inputs = np.asarray(speech_inputs)
|
||||
pt_speech_inputs = torch.tensor(speech_inputs)
|
||||
encoded_sequences_1 = feature_extractor(pt_speech_inputs, return_tensors="np").audio_input_features
|
||||
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").audio_input_features
|
||||
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
|
||||
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
|
||||
|
||||
@require_torch
|
||||
def test_double_precision_pad(self):
|
||||
import torch
|
||||
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
|
||||
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
|
||||
py_speech_inputs = np_speech_inputs.tolist()
|
||||
|
||||
for inputs in [py_speech_inputs, np_speech_inputs]:
|
||||
np_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="np")
|
||||
self.assertTrue(np_processed.audio_input_features.dtype == np.float32)
|
||||
pt_processed = feature_extractor.pad([{"audio_input_features": inputs}], return_tensors="pt")
|
||||
self.assertTrue(pt_processed.audio_input_features.dtype == torch.float32)
|
||||
|
||||
def _load_datasamples(self, num_samples):
|
||||
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
# automatic decoding with librispeech
|
||||
speech_samples = ds.sort("id")[:num_samples]["audio"]
|
||||
|
||||
return [x["array"] for x in speech_samples]
|
||||
|
||||
@require_torch
|
||||
def test_torch_integration(self):
|
||||
# fmt: off
|
||||
EXPECTED_INPUT_FEATURES = torch.tensor(
|
||||
[
|
||||
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
|
||||
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
|
||||
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
|
||||
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819,
|
||||
]
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
input_speech = self._load_datasamples(1)
|
||||
feature_extractor = Phi4MultimodalFeatureExtractor()
|
||||
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
|
||||
|
||||
self.assertEqual(input_features.shape, (1, 584, 80))
|
||||
torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
|
||||
|
||||
@unittest.mock.patch(
|
||||
"transformers.models.phi4_multimodal.feature_extraction_phi4_multimodal.is_torch_available", lambda: False
|
||||
)
|
||||
def test_numpy_integration(self):
|
||||
# fmt: off
|
||||
EXPECTED_INPUT_FEATURES = np.array(
|
||||
[
|
||||
6.5242944, 7.226712, 8.091721, 8.004097, 6.824679, 6.3216243,
|
||||
5.959894, 5.676975, 5.744051, 5.61384, 6.6793485, 6.8597484,
|
||||
5.5374746, 6.532976, 5.4879804, 7.3279905, 9.073576, 9.766463,
|
||||
9.877262, 10.082759, 10.051792, 10.173581, 10.0144825, 9.254548,
|
||||
11.049487, 11.651841, 10.865354, 10.229329, 9.104464, 9.481946,
|
||||
]
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
input_speech = self._load_datasamples(1)
|
||||
feature_extractor = Phi4MultimodalFeatureExtractor()
|
||||
input_features = feature_extractor(input_speech, return_tensors="np").audio_input_features
|
||||
self.assertEqual(input_features.shape, (1, 584, 80))
|
||||
self.assertTrue(np.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
|
||||
|
||||
@require_torch
|
||||
def test_torch_integration_batch(self):
|
||||
# fmt: off
|
||||
EXPECTED_INPUT_FEATURES = torch.tensor(
|
||||
[
|
||||
[
|
||||
6.5243, 7.2267, 8.0917, 8.0041, 6.8247, 6.3216, 5.9599, 5.6770,
|
||||
5.7441, 5.6138, 6.6793, 6.8597, 5.5375, 6.5330, 5.4880, 7.3280,
|
||||
9.0736, 9.7665, 9.8773, 10.0828, 10.0518, 10.1736, 10.0145, 9.2545,
|
||||
11.0495, 11.6518, 10.8654, 10.2293, 9.1045, 9.4819
|
||||
],
|
||||
[
|
||||
7.5105, 7.9453, 8.6161, 7.7666, 7.2572, 6.8823, 6.3242, 6.1899,
|
||||
6.9706, 8.0810, 7.3227, 5.8580, 5.4990, 7.7373, 8.5447, 7.7203,
|
||||
6.3230, 7.1995, 7.1463, 7.3153, 7.4054, 7.2855, 6.9396, 7.0255,
|
||||
7.3285, 7.2748, 8.0742, 7.3998, 6.4813, 6.7509
|
||||
],
|
||||
[
|
||||
7.7932, 8.1604, 8.7653, 8.2080, 7.2630, 6.4537, 4.8394, 6.3153,
|
||||
8.0207, 8.3379, 6.0896, 5.7369, 5.8601, 4.7598, 4.8850, 6.2529,
|
||||
3.9354, 6.1577, 7.9921, 9.6577, 10.1449, 9.1414, 9.3361, 9.0022,
|
||||
9.2533, 10.0548, 10.4372, 8.8550, 9.1266, 9.9013
|
||||
]
|
||||
]
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
input_speech = self._load_datasamples(3)
|
||||
feature_extractor = Phi4MultimodalFeatureExtractor()
|
||||
input_features = feature_extractor(input_speech, return_tensors="pt").audio_input_features
|
||||
self.assertEqual(input_features.shape, (3, 1247, 80))
|
||||
print(input_features[:, 0, :30])
|
||||
torch.testing.assert_close(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
|
||||
@@ -0,0 +1,309 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2021 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import inspect
|
||||
import math
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from packaging import version
|
||||
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
|
||||
|
||||
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
if is_torchvision_available():
|
||||
from transformers import Phi4MultimodalImageProcessorFast
|
||||
|
||||
|
||||
class Phi4MultimodalImageProcessingTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
image_size=100,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
dynamic_hd=36,
|
||||
do_resize=True,
|
||||
size=None,
|
||||
patch_size=14,
|
||||
do_normalize=True,
|
||||
image_mean=[0.5, 0.5, 0.5],
|
||||
image_std=[0.5, 0.5, 0.5],
|
||||
do_convert_rgb=True,
|
||||
):
|
||||
super().__init__()
|
||||
size = size if size is not None else {"height": 100, "width": 100}
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.image_size = image_size
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.dynamic_hd = dynamic_hd
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.patch_size = patch_size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
self.do_convert_rgb = do_convert_rgb
|
||||
|
||||
def prepare_image_processor_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"patch_size": self.patch_size,
|
||||
"dynamic_hd": self.dynamic_hd,
|
||||
"do_normalize": self.do_normalize,
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
"do_convert_rgb": self.do_convert_rgb,
|
||||
}
|
||||
|
||||
def expected_output_image_shape(self, images):
|
||||
max_num_patches = 0
|
||||
for image in images:
|
||||
if isinstance(image, Image.Image):
|
||||
width, height = image.size
|
||||
elif isinstance(image, np.ndarray):
|
||||
height, width = image.shape[:2]
|
||||
elif isinstance(image, torch.Tensor):
|
||||
height, width = image.shape[-2:]
|
||||
w_crop_num = math.ceil(width / float(self.size["width"]))
|
||||
h_crop_num = math.ceil(height / float(self.size["height"]))
|
||||
num_patches = min(w_crop_num * h_crop_num + 1, self.dynamic_hd)
|
||||
max_num_patches = max(max_num_patches, num_patches)
|
||||
num_patches = max_num_patches
|
||||
return num_patches, self.num_channels, self.size["height"], self.size["width"]
|
||||
|
||||
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
|
||||
return prepare_image_inputs(
|
||||
batch_size=self.batch_size,
|
||||
num_channels=self.num_channels,
|
||||
min_resolution=self.min_resolution,
|
||||
max_resolution=self.max_resolution,
|
||||
equal_resolution=equal_resolution,
|
||||
numpify=numpify,
|
||||
torchify=torchify,
|
||||
)
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class Phi4MultimodalImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
|
||||
fast_image_processing_class = Phi4MultimodalImageProcessorFast if is_torchvision_available() else None
|
||||
test_slow_image_processor = False
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.image_processor_tester = Phi4MultimodalImageProcessingTester(self)
|
||||
|
||||
@property
|
||||
def image_processor_dict(self):
|
||||
return self.image_processor_tester.prepare_image_processor_dict()
|
||||
|
||||
def test_image_processor_properties(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
self.assertTrue(hasattr(image_processing, "do_resize"))
|
||||
self.assertTrue(hasattr(image_processing, "size"))
|
||||
self.assertTrue(hasattr(image_processing, "do_center_crop"))
|
||||
self.assertTrue(hasattr(image_processing, "center_crop"))
|
||||
self.assertTrue(hasattr(image_processing, "do_normalize"))
|
||||
self.assertTrue(hasattr(image_processing, "image_mean"))
|
||||
self.assertTrue(hasattr(image_processing, "image_std"))
|
||||
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
|
||||
|
||||
def test_image_processor_from_dict_with_kwargs(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict)
|
||||
self.assertEqual(image_processor.size, {"height": 100, "width": 100})
|
||||
|
||||
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
|
||||
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
|
||||
|
||||
@unittest.skip(reason="Phi4MultimodalImageProcessorFast doesn't treat 4 channel PIL and numpy consistently yet")
|
||||
def test_call_numpy_4_channels(self):
|
||||
pass
|
||||
|
||||
def test_cast_dtype_device(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
if self.test_cast_dtype is not None:
|
||||
# Initialize image_processor
|
||||
image_processor = image_processing_class(**self.image_processor_dict)
|
||||
|
||||
# create random PyTorch tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
|
||||
encoding = image_processor(image_inputs, return_tensors="pt")
|
||||
# for layoutLM compatibility
|
||||
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
|
||||
self.assertEqual(encoding.image_pixel_values.dtype, torch.float32)
|
||||
|
||||
encoding = image_processor(image_inputs, return_tensors="pt").to(torch.float16)
|
||||
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
|
||||
self.assertEqual(encoding.image_pixel_values.dtype, torch.float16)
|
||||
|
||||
encoding = image_processor(image_inputs, return_tensors="pt").to("cpu", torch.bfloat16)
|
||||
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
|
||||
self.assertEqual(encoding.image_pixel_values.dtype, torch.bfloat16)
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
_ = image_processor(image_inputs, return_tensors="pt").to(torch.bfloat16, "cpu")
|
||||
|
||||
# Try with text + image feature
|
||||
encoding = image_processor(image_inputs, return_tensors="pt")
|
||||
encoding.update({"input_ids": torch.LongTensor([[1, 2, 3], [4, 5, 6]])})
|
||||
encoding = encoding.to(torch.float16)
|
||||
|
||||
self.assertEqual(encoding.image_pixel_values.device, torch.device("cpu"))
|
||||
self.assertEqual(encoding.image_pixel_values.dtype, torch.float16)
|
||||
self.assertEqual(encoding.input_ids.dtype, torch.long)
|
||||
|
||||
def test_call_pil(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random PIL images
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
|
||||
)
|
||||
|
||||
def test_call_numpy(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
|
||||
)
|
||||
|
||||
def test_call_pytorch(self):
|
||||
for image_processing_class in self.image_processor_list:
|
||||
# Initialize image_processing
|
||||
image_processing = image_processing_class(**self.image_processor_dict)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
|
||||
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = image_processing(image_inputs[0], return_tensors="pt").image_pixel_values
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]])
|
||||
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
|
||||
|
||||
# Test batched
|
||||
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
|
||||
encoded_images = image_processing(image_inputs, return_tensors="pt").image_pixel_values
|
||||
self.assertEqual(
|
||||
tuple(encoded_images.shape),
|
||||
(self.image_processor_tester.batch_size, *expected_output_image_shape),
|
||||
)
|
||||
|
||||
def test_image_processor_preprocess_arguments(self):
|
||||
is_tested = False
|
||||
|
||||
for image_processing_class in self.image_processor_list:
|
||||
image_processor = image_processing_class(**self.image_processor_dict)
|
||||
|
||||
# validation done by _valid_processor_keys attribute
|
||||
if hasattr(image_processor, "_valid_processor_keys") and hasattr(image_processor, "preprocess"):
|
||||
preprocess_parameter_names = inspect.getfullargspec(image_processor.preprocess).args
|
||||
preprocess_parameter_names.remove("self")
|
||||
preprocess_parameter_names.sort()
|
||||
valid_processor_keys = image_processor._valid_processor_keys
|
||||
valid_processor_keys.sort()
|
||||
self.assertEqual(preprocess_parameter_names, valid_processor_keys)
|
||||
is_tested = True
|
||||
|
||||
# validation done by @filter_out_non_signature_kwargs decorator
|
||||
if hasattr(image_processor.preprocess, "_filter_out_non_signature_kwargs"):
|
||||
if hasattr(self.image_processor_tester, "prepare_image_inputs"):
|
||||
inputs = self.image_processor_tester.prepare_image_inputs()
|
||||
elif hasattr(self.image_processor_tester, "prepare_video_inputs"):
|
||||
inputs = self.image_processor_tester.prepare_video_inputs()
|
||||
else:
|
||||
self.skipTest(reason="No valid input preparation method found")
|
||||
|
||||
with warnings.catch_warnings(record=True) as raised_warnings:
|
||||
warnings.simplefilter("always")
|
||||
image_processor(inputs, extra_argument=True)
|
||||
|
||||
messages = " ".join([str(w.message) for w in raised_warnings])
|
||||
self.assertGreaterEqual(len(raised_warnings), 1)
|
||||
self.assertIn("extra_argument", messages)
|
||||
is_tested = True
|
||||
|
||||
if not is_tested:
|
||||
self.skipTest(reason="No validation found for `preprocess` method")
|
||||
|
||||
@slow
|
||||
@pytest.mark.torch_compile_test
|
||||
def test_can_compile_fast_image_processor(self):
|
||||
if self.fast_image_processing_class is None:
|
||||
self.skipTest("Skipping compilation test as fast image processor is not defined")
|
||||
if version.parse(torch.__version__) < version.parse("2.3"):
|
||||
self.skipTest(reason="This test requires torch >= 2.3 to run.")
|
||||
|
||||
torch.compiler.reset()
|
||||
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
|
||||
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
|
||||
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
|
||||
image_processor = torch.compile(image_processor, mode="reduce-overhead")
|
||||
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
|
||||
|
||||
torch.testing.assert_close(
|
||||
output_eager.image_pixel_values, output_compiled.image_pixel_values, rtol=1e-4, atol=1e-4
|
||||
)
|
||||
@@ -0,0 +1,397 @@
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from parameterized import parameterized
|
||||
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoProcessor,
|
||||
GenerationConfig,
|
||||
Phi4MultimodalAudioConfig,
|
||||
Phi4MultimodalConfig,
|
||||
Phi4MultimodalForCausalLM,
|
||||
Phi4MultimodalModel,
|
||||
Phi4MultimodalVisionConfig,
|
||||
is_torch_available,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
cleanup,
|
||||
require_torch,
|
||||
require_torch_large_accelerator,
|
||||
require_torchcodec,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import is_torchcodec_available
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
|
||||
if is_torchcodec_available():
|
||||
import torchcodec
|
||||
|
||||
|
||||
class Phi4MultimodalModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=2,
|
||||
seq_length=12,
|
||||
image_seq_length=275,
|
||||
audio_seq_length=8,
|
||||
is_training=True,
|
||||
num_hidden_layers=2,
|
||||
vocab_size=49,
|
||||
hidden_size=32,
|
||||
intermediate_size=64,
|
||||
num_attention_heads=8,
|
||||
num_key_value_heads=4,
|
||||
bos_token_id=0,
|
||||
eos_token_id=0,
|
||||
pad_token_id=0,
|
||||
image_token_id=1,
|
||||
audio_token_id=2,
|
||||
image_size=16,
|
||||
audio_size=12,
|
||||
audio_config=Phi4MultimodalAudioConfig(
|
||||
num_blocks=2,
|
||||
hidden_size=32,
|
||||
num_attention_heads=8,
|
||||
intermediate_size=48,
|
||||
depthwise_separable_out_channel=128,
|
||||
nemo_conv_channels=128,
|
||||
initializer_range=1e-5,
|
||||
),
|
||||
vision_config=Phi4MultimodalVisionConfig(
|
||||
num_hidden_layers=2,
|
||||
hidden_size=32,
|
||||
intermediate_size=64,
|
||||
num_attention_heads=8,
|
||||
crop_size=16,
|
||||
initializer_range=1e-5,
|
||||
),
|
||||
):
|
||||
self.parent = parent
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.num_key_value_heads = num_key_value_heads
|
||||
self.bos_token_id = bos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.eos_token_id = eos_token_id
|
||||
self.image_token_id = image_token_id
|
||||
self.audio_token_id = audio_token_id
|
||||
self.audio_config = audio_config
|
||||
self.vision_config = vision_config
|
||||
|
||||
self.is_training = is_training
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length + image_seq_length + audio_seq_length
|
||||
self.image_seq_length = image_seq_length
|
||||
self.audio_seq_length = audio_seq_length
|
||||
self.image_size = image_size
|
||||
self.audio_size = audio_size
|
||||
self.num_channels = 3
|
||||
|
||||
def get_config(self):
|
||||
return Phi4MultimodalConfig(
|
||||
num_hidden_layers=self.num_hidden_layers,
|
||||
vocab_size=self.vocab_size,
|
||||
hidden_size=self.hidden_size,
|
||||
intermediate_size=self.intermediate_size,
|
||||
num_attention_heads=self.num_attention_heads,
|
||||
num_key_value_heads=self.num_key_value_heads,
|
||||
bos_token_id=self.bos_token_id,
|
||||
eos_token_id=self.eos_token_id,
|
||||
pad_token_id=self.pad_token_id,
|
||||
vision_config=self.vision_config,
|
||||
audio_config=self.audio_config,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
# The shapes corresponds to the inputs for image of size 16x16
|
||||
image_pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
|
||||
image_attention_mask = torch.ones(self.batch_size, 2, 1, 1)
|
||||
image_sizes = torch.tensor(
|
||||
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
|
||||
)
|
||||
|
||||
# Feature sizes returned by an audio of size 10000
|
||||
audio_input_features = floats_tensor([self.batch_size, 61, 80])
|
||||
audio_embed_sizes = torch.tensor([self.audio_seq_length] * self.batch_size, dtype=torch.long)
|
||||
|
||||
input_ids[input_ids == self.pad_token_id] = self.pad_token_id + 1 # random value but not pad token
|
||||
input_ids[-1, 0] = self.pad_token_id # mask the last text token
|
||||
input_ids[:, -self.image_seq_length - self.audio_seq_length : -self.audio_seq_length] = self.image_token_id
|
||||
input_ids[:, -self.audio_seq_length :] = self.audio_token_id
|
||||
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
attention_mask[-1, 0] = 0 # mask the last text token
|
||||
config = self.get_config()
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
image_pixel_values,
|
||||
image_attention_mask,
|
||||
image_sizes,
|
||||
audio_input_features,
|
||||
audio_embed_sizes,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
image_pixel_values,
|
||||
image_attention_mask,
|
||||
image_sizes,
|
||||
audio_input_features,
|
||||
audio_embed_sizes,
|
||||
) = self.prepare_config_and_inputs()
|
||||
inputs_dict = {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"image_pixel_values": image_pixel_values,
|
||||
"image_attention_mask": image_attention_mask,
|
||||
"image_sizes": image_sizes,
|
||||
"audio_input_features": audio_input_features,
|
||||
"audio_embed_sizes": audio_embed_sizes,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class Phi4MultimodalModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
|
||||
"""
|
||||
Model tester for `Phi4Multimodal`.
|
||||
"""
|
||||
|
||||
all_model_classes = (Phi4MultimodalForCausalLM, Phi4MultimodalModel) if is_torch_available() else ()
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
_is_composite = True
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = Phi4MultimodalModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=Phi4MultimodalConfig)
|
||||
|
||||
@unittest.skip(reason="Unstable test")
|
||||
def test_initialization(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
|
||||
def test_training_gradient_checkpointing(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
|
||||
def test_training_gradient_checkpointing_use_reentrant(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Depending on input modalities, some params may not have gradients")
|
||||
def test_training_gradient_checkpointing_use_reentrant_false(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Test tries to instantiate dynamic cache with an arg")
|
||||
def test_multi_gpu_data_parallel_forward(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Test is only for old attention format")
|
||||
def test_sdpa_can_dispatch_composite_models(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Static cache supported only for text-only inputs (not images or audios)")
|
||||
def test_generate_from_inputs_embeds_with_static_cache(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Static cache supported only for text-only inputs (not images or audios)")
|
||||
def test_generate_with_static_cache(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="Supported only for text-only inputs (otherwise dynamic control flows for multimodal inputs)"
|
||||
)
|
||||
def test_generate_compilation_all_outputs(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="Supported only for text-only inputs (otherwise dynamic control flows for multimodal inputs)"
|
||||
)
|
||||
@pytest.mark.torch_compile_test
|
||||
def test_generate_compile_model_forward_fullgraph(self):
|
||||
pass
|
||||
|
||||
@parameterized.expand([("random",), ("same",)])
|
||||
@unittest.skip(reason="`image_attention_mask` has a specific shape")
|
||||
def test_assisted_decoding_matches_greedy_search(self, assistant_type):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="`image_attention_mask` has a specific shape")
|
||||
def test_assisted_decoding_sample(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="`image_attention_mask` has a specific shape")
|
||||
def test_prompt_lookup_decoding_matches_greedy_search(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Cannot unpad inputs for all modalities so easily")
|
||||
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Dynamo error")
|
||||
def test_flex_attention_with_grads(self):
|
||||
pass
|
||||
|
||||
|
||||
@require_torch
|
||||
@slow
|
||||
class Phi4MultimodalIntegrationTest(unittest.TestCase):
|
||||
checkpoint_path = "microsoft/Phi-4-multimodal-instruct"
|
||||
revision = "refs/pr/70"
|
||||
image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
|
||||
audio_url = "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
|
||||
|
||||
def setUp(self):
|
||||
# Currently, the Phi-4 checkpoint on the hub is not working with the latest Phi-4 code, so the slow integration tests
|
||||
# won't pass without using the correct revision (refs/pr/70)
|
||||
self.processor = AutoProcessor.from_pretrained(self.checkpoint_path, revision=self.revision)
|
||||
self.generation_config = GenerationConfig(max_new_tokens=20, do_sample=False)
|
||||
self.user_token = "<|user|>"
|
||||
self.assistant_token = "<|assistant|>"
|
||||
self.end_token = "<|end|>"
|
||||
self.image = Image.open(requests.get(self.image_url, stream=True).raw)
|
||||
audio_bytes = requests.get(self.audio_url, stream=True).raw.data
|
||||
samples = torchcodec.decoders.AudioDecoder(audio_bytes).get_all_samples()
|
||||
self.audio, self.sampling_rate = samples.data, samples.sample_rate
|
||||
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
def tearDown(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
def test_text_only_generation(self):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
|
||||
)
|
||||
|
||||
prompt = f"{self.user_token}What is the answer for 1+1? Explain it.{self.end_token}{self.assistant_token}"
|
||||
inputs = self.processor(prompt, images=None, return_tensors="pt").to(torch_device)
|
||||
|
||||
output = model.generate(
|
||||
**inputs,
|
||||
generation_config=self.generation_config,
|
||||
)
|
||||
output = output[:, inputs["input_ids"].shape[1] :]
|
||||
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
|
||||
EXPECTED_RESPONSE = "The answer for 1+1 is 2. This is because when you add one to another"
|
||||
|
||||
self.assertEqual(response, EXPECTED_RESPONSE)
|
||||
|
||||
def test_vision_text_generation(self):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
|
||||
)
|
||||
|
||||
prompt = f"{self.user_token}<|image|>What is shown in this image?{self.end_token}{self.assistant_token}"
|
||||
inputs = self.processor(prompt, images=self.image, return_tensors="pt").to(torch_device)
|
||||
|
||||
output = model.generate(
|
||||
**inputs,
|
||||
generation_config=self.generation_config,
|
||||
)
|
||||
output = output[:, inputs["input_ids"].shape[1] :]
|
||||
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
|
||||
EXPECTED_RESPONSES = Expectations(
|
||||
{
|
||||
("cuda", 7): 'The image shows a vibrant scene at a traditional Chinese-style street entrance, known as a "gate"',
|
||||
("cuda", 8): 'The image shows a vibrant scene at a street intersection in a city with a Chinese-influenced architectural',
|
||||
}
|
||||
) # fmt: skip
|
||||
EXPECTED_RESPONSE = EXPECTED_RESPONSES.get_expectation()
|
||||
|
||||
self.assertEqual(response, EXPECTED_RESPONSE)
|
||||
|
||||
@require_torch_large_accelerator
|
||||
def test_multi_image_vision_text_generation(self):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
|
||||
)
|
||||
|
||||
images = []
|
||||
placeholder = ""
|
||||
for i in range(1, 5):
|
||||
url = f"https://image.slidesharecdn.com/azureintroduction-191206101932/75/Introduction-to-Microsoft-Azure-Cloud-{i}-2048.jpg"
|
||||
images.append(Image.open(requests.get(url, stream=True).raw))
|
||||
placeholder += "<|image|>"
|
||||
|
||||
prompt = f"{self.user_token}{placeholder}Summarize the deck of slides.{self.end_token}{self.assistant_token}"
|
||||
inputs = self.processor(prompt, images, return_tensors="pt").to(torch_device)
|
||||
|
||||
output = model.generate(
|
||||
**inputs,
|
||||
generation_config=self.generation_config,
|
||||
)
|
||||
output = output[:, inputs["input_ids"].shape[1] :]
|
||||
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
|
||||
EXPECTED_RESPONSE = "The presentation provides an overview of Microsoft Azure, a cloud computing platform by Microsoft, and its various services"
|
||||
|
||||
self.assertEqual(response, EXPECTED_RESPONSE)
|
||||
|
||||
@require_torchcodec
|
||||
def test_audio_text_generation(self):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
self.checkpoint_path, revision=self.revision, dtype=torch.float16, device_map=torch_device
|
||||
)
|
||||
|
||||
prompt = f"{self.user_token}<|audio|>What is happening in this audio?{self.end_token}{self.assistant_token}"
|
||||
inputs = self.processor(prompt, audio=self.audio, sampling_rate=self.sampling_rate, return_tensors="pt").to(
|
||||
torch_device
|
||||
)
|
||||
|
||||
output = model.generate(
|
||||
**inputs,
|
||||
generation_config=self.generation_config,
|
||||
)
|
||||
output = output[:, inputs["input_ids"].shape[1] :]
|
||||
response = self.processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
|
||||
# Yes, it is truly the expected response... Even though the model correctly treats the audio file
|
||||
EXPECTED_RESPONSE = "I'm sorry, but I can't listen to audio. However, if you describe the audio to me,"
|
||||
|
||||
self.assertEqual(response, EXPECTED_RESPONSE)
|
||||
Reference in New Issue
Block a user