This commit is contained in:
2025-10-09 16:47:16 +08:00
parent c8feb4deb5
commit e27e3f16bb
5248 changed files with 1778505 additions and 0 deletions

View File

@@ -0,0 +1,272 @@
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import json
import os
import shutil
import tempfile
import unittest
from transformers import AutoTokenizer, LlamaTokenizerFast, PreTrainedTokenizerFast
from transformers.testing_utils import require_tokenizers
from ..test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class PreTrainedTokenizationFastTest(TokenizerTesterMixin, unittest.TestCase):
rust_tokenizer_class = PreTrainedTokenizerFast
test_slow_tokenizer = False
test_rust_tokenizer = True
from_pretrained_vocab_key = "tokenizer_file"
@classmethod
def setUpClass(cls):
cls.test_rust_tokenizer = False # because we don't have pretrained_vocab_files_map
super().setUpClass()
cls.test_rust_tokenizer = True
model_paths = ["robot-test/dummy-tokenizer-fast", "robot-test/dummy-tokenizer-wordlevel"]
cls.bytelevel_bpe_model_name = "SaulLu/dummy-tokenizer-bytelevel-bpe"
# Inclusion of 2 tokenizers to test different types of models (Unigram and WordLevel for the moment)
cls.tokenizers_list = [(PreTrainedTokenizerFast, model_path, {}) for model_path in model_paths]
tokenizer = PreTrainedTokenizerFast.from_pretrained(model_paths[0])
tokenizer.save_pretrained(cls.tmpdirname)
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_tokenizer_mismatch_warning(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_encode_decode_with_spaces(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_added_tokens_serialization(self):
pass
@unittest.skip(
"We disable this test for PreTrainedTokenizerFast because it is the only tokenizer that is not linked to any model"
)
def test_additional_special_tokens_serialization(self):
pass
@unittest.skip(reason="PreTrainedTokenizerFast is the only tokenizer that is not linked to any model")
def test_prepare_for_model(self):
pass
@unittest.skip(reason="PreTrainedTokenizerFast doesn't have tokenizer_file in its signature")
def test_rust_tokenizer_signature(self):
pass
def test_training_new_tokenizer(self):
tmpdirname_orig = self.tmpdirname
# Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel.
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer()
finally:
# Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer
# is restored
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_special_tokens_change(self):
tmpdirname_orig = self.tmpdirname
# Here we want to test the 2 available tokenizers that use 2 different types of models: Unigram and WordLevel.
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
try:
self.tmpdirname = tempfile.mkdtemp()
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer.save_pretrained(self.tmpdirname)
super().test_training_new_tokenizer_with_special_tokens_change()
finally:
# Even if the test fails, we must be sure that the folder is deleted and that the default tokenizer
# is restored
shutil.rmtree(self.tmpdirname)
self.tmpdirname = tmpdirname_orig
def test_training_new_tokenizer_with_bytelevel(self):
tokenizer = self.rust_tokenizer_class.from_pretrained(self.bytelevel_bpe_model_name)
toy_text_iterator = ("a" for _ in range(1000))
new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
encoding_ids = new_tokenizer.encode("a🤗")
self.assertEqual(encoding_ids, [64, 172, 253, 97, 245])
def test_init_from_tokenizers_model(self):
from tokenizers import Tokenizer
sentences = ["Hello, y'all!", "How are you 😁 ? There should not be any issue right?"]
tokenizer = Tokenizer.from_pretrained("google-t5/t5-base")
# Enable padding
tokenizer.enable_padding(pad_id=0, pad_token="<pad>", length=512, pad_to_multiple_of=8)
self.assertEqual(
tokenizer.padding,
{
"length": 512,
"pad_to_multiple_of": 8,
"pad_id": 0,
"pad_token": "<pad>",
"pad_type_id": 0,
"direction": "right",
},
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
tmpdirname = tempfile.mkdtemp()
fast_tokenizer.save_pretrained(tmpdirname)
fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname)
for tok in [fast_tokenizer, fast_from_saved]:
self.assertEqual(tok.pad_token_id, 0)
self.assertEqual(tok.padding_side, "right")
self.assertEqual(tok.pad_token, "<pad>")
self.assertEqual(tok.init_kwargs["max_length"], 512)
self.assertEqual(tok.init_kwargs["pad_to_multiple_of"], 8)
self.assertEqual(tok(sentences, padding = True), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1, 0, 0, 0, 0,0, 0, 0, 0],[ 571, 33, 25, 3, 2, 3, 58, 290, 225, 59, 36, 136, 962, 269, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip
tokenizer.enable_truncation(8, stride=0, strategy="longest_first", direction="right")
self.assertEqual(
tokenizer.truncation, {"max_length": 8, "stride": 0, "strategy": "longest_first", "direction": "right"}
)
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
tmpdirname = tempfile.mkdtemp()
fast_tokenizer.save_pretrained(tmpdirname)
fast_from_saved = PreTrainedTokenizerFast.from_pretrained(tmpdirname)
for tok in [fast_tokenizer, fast_from_saved]:
self.assertEqual(tok.truncation_side, "right")
self.assertEqual(tok.init_kwargs["truncation_strategy"], "longest_first")
self.assertEqual(tok.init_kwargs["max_length"], 8)
self.assertEqual(tok.init_kwargs["stride"], 0)
# NOTE even if the model has a default max_length, it is not used...
# thus tok(sentences, truncation = True) does nothing and does not warn either
self.assertEqual(tok(sentences, truncation = True, max_length = 8), {'input_ids': [[8774, 6, 3, 63, 31, 1748, 55, 1],[ 571, 33, 25, 3, 2, 3, 58, 1]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1]]}) # fmt: skip
def test_class_after_save_and_reload(self):
# Model contains a `LlamaTokenizerFast` tokenizer with no slow fallback
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
with tempfile.TemporaryDirectory() as temp_dir:
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer(use_fast=True) type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
# Fast tokenizer will ignore `use_fast=False`
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False)
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type(use_fast=False): `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
# Save tokenizer
tokenizer.save_pretrained(temp_dir)
tokenizer = AutoTokenizer.from_pretrained(temp_dir, use_fast=False)
# Verify post save and reload the fast tokenizer class did not change
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
tokenizer = AutoTokenizer.from_pretrained(temp_dir, use_fast=True)
# Verify post save and reload the fast tokenizer class did not change
self.assertTrue(
isinstance(tokenizer, LlamaTokenizerFast),
f"Expected tokenizer type: `LlamaTokenizerFast`, actual=`{type(tokenizer)}`",
)
@require_tokenizers
class TokenizerVersioningTest(unittest.TestCase):
def test_local_versioning(self):
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
json_tokenizer["model"]["vocab"]["huggingface"] = len(tokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
# Hack to save this in the tokenizer_config.json
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.4.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
json.dump(json_tokenizer, open(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), "w"))
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer) + 1)
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old tokenizer file as the version of Transformers is < 4.0.0
shutil.move(os.path.join(tmp_dir, "tokenizer.4.0.0.json"), os.path.join(tmp_dir, "tokenizer.42.0.0.json"))
tokenizer.init_kwargs["fast_tokenizer_files"] = ["tokenizer.42.0.0.json"]
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertEqual(len(new_tokenizer), len(tokenizer))
json_tokenizer = json.loads(new_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
def test_repo_versioning(self):
# This repo has two tokenizer files, one for v4.0.0 and above with an added token, one for versions lower.
repo = "hf-internal-testing/test-two-tokenizers"
# This should pick the new tokenizer file as the version of Transformers is > 4.0.0
tokenizer = AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(tokenizer), 28997)
json_tokenizer = json.loads(tokenizer._tokenizer.to_str())
self.assertIn("huggingface", json_tokenizer["model"]["vocab"])
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
old_transformers.tokenization_utils_base.__version__ = "3.0.0"
old_tokenizer = old_transformers.models.auto.AutoTokenizer.from_pretrained(repo)
self.assertEqual(len(old_tokenizer), 28996)
json_tokenizer = json.loads(old_tokenizer._tokenizer.to_str())
self.assertNotIn("huggingface", json_tokenizer["model"]["vocab"])
@require_tokenizers
class ReduceMutableBorrowTests(unittest.TestCase):
def test_async_share_tokenizer(self):
# See https://github.com/huggingface/transformers/pull/12550
# and https://github.com/huggingface/tokenizers/issues/537
tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel")
text = "The Matrix is a 1999 science fiction action film."
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)]
return_value = [future.result() for future in futures]
self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)])
def fetch(self, tokenizer, text):
return tokenizer.encode(text, truncation="longest_first", padding="longest")

View File

@@ -0,0 +1,406 @@
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ruff: isort: skip_file
"""
import os
import pickle
import tempfile
import unittest
from typing import Callable, Optional
import numpy as np
from transformers import (
AutoTokenizer,
BatchEncoding,
BertTokenizer,
BertTokenizerFast,
LlamaTokenizerFast,
PreTrainedTokenizer,
PreTrainedTokenizerFast,
TensorType,
TokenSpan,
is_tokenizers_available,
)
from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
from transformers.testing_utils import (
CaptureStderr,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
if is_tokenizers_available():
import tokenizers
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
class TokenizerUtilsTest(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class):
s3_models = list(tokenizer_class.max_model_input_sizes.keys())
for model_name in s3_models[:1]:
tokenizer = tokenizer_class.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, tokenizer_class)
self.assertIsInstance(tokenizer, PreTrainedTokenizer)
for special_tok in tokenizer.all_special_tokens:
self.assertIsInstance(special_tok, str)
special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
self.assertIsInstance(special_tok_id, int)
def assert_dump_and_restore(self, be_original: BatchEncoding, equal_op: Optional[Callable] = None):
batch_encoding_str = pickle.dumps(be_original)
self.assertIsNotNone(batch_encoding_str)
be_restored = pickle.loads(batch_encoding_str)
# Ensure is_fast is correctly restored
self.assertEqual(be_restored.is_fast, be_original.is_fast)
# Ensure encodings are potentially correctly restored
if be_original.is_fast:
self.assertIsNotNone(be_restored.encodings)
else:
self.assertIsNone(be_restored.encodings)
# Ensure the keys are the same
for original_v, restored_v in zip(be_original.values(), be_restored.values()):
if equal_op:
self.assertTrue(equal_op(restored_v, original_v))
else:
self.assertEqual(restored_v, original_v)
@slow
def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer)
def test_tensor_type_from_str(self):
self.assertEqual(TensorType("pt"), TensorType.PYTORCH)
self.assertEqual(TensorType("np"), TensorType.NUMPY)
@require_tokenizers
def test_batch_encoding_pickle(self):
tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
# Python no tensor
with self.subTest("BatchEncoding (Python, return_tensors=None)"):
self.assert_dump_and_restore(tokenizer_p("Small example to encode"))
with self.subTest("BatchEncoding (Python, return_tensors=NUMPY)"):
self.assert_dump_and_restore(
tokenizer_p("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
)
with self.subTest("BatchEncoding (Rust, return_tensors=None)"):
self.assert_dump_and_restore(tokenizer_r("Small example to encode"))
with self.subTest("BatchEncoding (Rust, return_tensors=NUMPY)"):
self.assert_dump_and_restore(
tokenizer_r("Small example to encode", return_tensors=TensorType.NUMPY), np.array_equal
)
@require_torch
@require_tokenizers
def test_batch_encoding_pickle_pt(self):
import torch
tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
with self.subTest("BatchEncoding (Python, return_tensors=PYTORCH)"):
self.assert_dump_and_restore(
tokenizer_p("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
)
with self.subTest("BatchEncoding (Rust, return_tensors=PYTORCH)"):
self.assert_dump_and_restore(
tokenizer_r("Small example to encode", return_tensors=TensorType.PYTORCH), torch.equal
)
@require_tokenizers
def test_batch_encoding_is_fast(self):
tokenizer_p = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
with self.subTest("Python Tokenizer"):
self.assertFalse(tokenizer_p("Small example to_encode").is_fast)
with self.subTest("Rust Tokenizer"):
self.assertTrue(tokenizer_r("Small example to_encode").is_fast)
@require_tokenizers
def test_batch_encoding_word_to_tokens(self):
tokenizer_r = BertTokenizerFast.from_pretrained("google-bert/bert-base-cased")
encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True)
self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2))
self.assertEqual(encoded.word_to_tokens(1), None)
self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3))
def test_batch_encoding_with_labels(self):
batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type="np")
self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
self.assertEqual(tensor_batch["labels"].shape, (2,))
# test converting the converted
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type="np")
self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True)
self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
self.assertEqual(tensor_batch["labels"].shape, (1,))
@require_torch
def test_batch_encoding_with_labels_pt(self):
batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type="pt")
self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
self.assertEqual(tensor_batch["labels"].shape, (2,))
# test converting the converted
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type="pt")
self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True)
self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
self.assertEqual(tensor_batch["labels"].shape, (1,))
def test_padding_accepts_tensors(self):
features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors="np")
self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
@require_tokenizers
def test_decoding_single_token(self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased")
token_id = 2300
decoded_flat = tokenizer.decode(token_id)
decoded_list = tokenizer.decode([token_id])
self.assertEqual(decoded_flat, "Force")
self.assertEqual(decoded_list, "Force")
token_id = 0
decoded_flat = tokenizer.decode(token_id)
decoded_list = tokenizer.decode([token_id])
self.assertEqual(decoded_flat, "[PAD]")
self.assertEqual(decoded_list, "[PAD]")
last_item_id = tokenizer.vocab_size - 1
decoded_flat = tokenizer.decode(last_item_id)
decoded_list = tokenizer.decode([last_item_id])
self.assertEqual(decoded_flat, "##")
self.assertEqual(decoded_list, "##")
def test_extra_special_tokens_multimodal(self):
special_tokens_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
llama_tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
llama_tokenizer.extra_special_tokens = {
"boi_token": "<image_start>",
"eoi_token": "<image_end>",
"image_token": "<image>",
}
self.assertListEqual(llama_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, special_tokens_list)
with tempfile.TemporaryDirectory() as tmpdirname:
llama_tokenizer.save_pretrained(tmpdirname)
# load back and check we have extra special tokens set
loaded_tokenizer = LlamaTokenizerFast.from_pretrained(tmpdirname)
multimodal_special_tokens_list = special_tokens_list + ["boi_token", "eoi_token", "image_token"]
self.assertListEqual(loaded_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, multimodal_special_tokens_list)
# We set an image_token_id before, so we can get an "image_token" as str that matches the id
self.assertTrue(loaded_tokenizer.image_token == "<image>")
self.assertTrue(loaded_tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>"))
# save one more time and make sure the image token can get loaded back
with tempfile.TemporaryDirectory() as tmpdirname:
loaded_tokenizer.save_pretrained(tmpdirname)
loaded_tokenizer_with_extra_tokens = LlamaTokenizerFast.from_pretrained(tmpdirname)
self.assertTrue(loaded_tokenizer_with_extra_tokens.image_token == "<image>")
# test that we can also indicate extra tokens during load time
extra_special_tokens = {
"boi_token": "<image_start>",
"eoi_token": "<image_end>",
"image_token": "<image>",
}
tokenizer = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b", extra_special_tokens=extra_special_tokens
)
self.assertTrue(tokenizer.image_token == "<image>")
self.assertTrue(tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>"))
@require_tokenizers
def test_decoding_skip_special_tokens(self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased")
tokenizer.add_tokens([""], special_tokens=True)
# test special token with other tokens, skip the special tokens
sentence = "This is a beautiful flower ஐ"
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=True)
self.assertEqual(decoded_sent, "This is a beautiful flower")
# test special token with other tokens, do not skip the special tokens
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[CLS] This is a beautiful flower ஐ [SEP]")
# test special token stand alone, skip the special tokens
sentence = ""
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=True)
self.assertEqual(decoded_sent, "")
# test special token stand alone, do not skip the special tokens
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[CLS] ஐ [SEP]")
# test single special token alone, skip
pad_id = 0
decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=True)
self.assertEqual(decoded_sent, "")
# test single special token alone, do not skip
decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[PAD]")
@require_torch
def test_padding_accepts_tensors_pt(self):
import torch
features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors="pt")
self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
@require_tokenizers
def test_instantiation_from_tokenizers(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer)
@require_tokenizers
def test_instantiation_from_tokenizers_json_file(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
with tempfile.TemporaryDirectory() as tmpdirname:
bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json"))
PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
def test_len_tokenizer(self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("bert-base-uncased")
added_tokens_size = len(tokenizer.added_tokens_decoder)
self.assertEqual(len(tokenizer), tokenizer.vocab_size)
tokenizer.add_tokens(["<test_token>"])
self.assertEqual(len(tokenizer), tokenizer.vocab_size + 1)
self.assertEqual(len(tokenizer.added_tokens_decoder), added_tokens_size + 1)
self.assertEqual(len(tokenizer.added_tokens_encoder), added_tokens_size + 1)
@require_sentencepiece
def test_sentencepiece_cohabitation(self):
from sentencepiece import sentencepiece_model_pb2 as _original_protobuf # noqa: F401
from transformers.convert_slow_tokenizer import import_protobuf # noqa: F401
# Now this will try to import sentencepiece_model_pb2_new.py. This should not fail even if the protobuf
# was already imported.
import_protobuf()
def test_training_new_tokenizer_edge_cases(self):
_tokenizer = Tokenizer(tokenizers.models.BPE(vocab={"a": 1, "b": 2, "ab": 3}, merges=[("a", "b")]))
_tokenizer.pre_tokenizer = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
_tokenizer.normalizer = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
_tokenizer.post_processor = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
def test_encode_message(self):
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
conversation = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hey there, how are you?"},
{"role": "assistant", "content": "Thank you for asking, I am doing well"},
{"role": "user", "content": "What's the weather like today?"},
{"role": "assistant", "content": "Today the weather is nice"},
]
# First, test the default case, where we encode the whole conversation at once
whole_conversation_tokens = tokenizer.apply_chat_template(conversation, tokenize=True)
# Now, test the message-by-message encoding
tokens = []
for i, message in enumerate(conversation):
tokens += tokenizer.encode_message_with_chat_template(message, conversation_history=conversation[:i])
self.assertEqual(whole_conversation_tokens, tokens)
def test_encode_message_raises_on_add_generation_prompt(self):
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
conversation = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hey there, how are you?"},
]
with self.assertRaises(ValueError):
tokenizer.encode_message_with_chat_template(conversation[0], add_generation_prompt=True)