init
This commit is contained in:
0
transformers/tests/models/llama/__init__.py
Normal file
0
transformers/tests/models/llama/__init__.py
Normal file
606
transformers/tests/models/llama/test_modeling_llama.py
Normal file
606
transformers/tests/models/llama/test_modeling_llama.py
Normal file
@@ -0,0 +1,606 @@
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch LLaMA model."""
|
||||
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
from packaging import version
|
||||
|
||||
from transformers import AutoTokenizer, StaticCache, is_torch_available
|
||||
from transformers.generation.configuration_utils import GenerationConfig
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
cleanup,
|
||||
require_read_token,
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
run_test_using_subprocess,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import (
|
||||
LlamaConfig,
|
||||
LlamaForCausalLM,
|
||||
LlamaForQuestionAnswering,
|
||||
LlamaForSequenceClassification,
|
||||
LlamaForTokenClassification,
|
||||
LlamaModel,
|
||||
LlamaTokenizer,
|
||||
)
|
||||
|
||||
|
||||
class LlamaModelTester(CausalLMModelTester):
|
||||
if is_torch_available():
|
||||
config_class = LlamaConfig
|
||||
base_model_class = LlamaModel
|
||||
causal_lm_class = LlamaForCausalLM
|
||||
sequence_class = LlamaForSequenceClassification
|
||||
token_class = LlamaForTokenClassification
|
||||
|
||||
|
||||
@require_torch
|
||||
class LlamaModelTest(CausalLMModelTest, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
LlamaModel,
|
||||
LlamaForCausalLM,
|
||||
LlamaForSequenceClassification,
|
||||
LlamaForQuestionAnswering,
|
||||
LlamaForTokenClassification,
|
||||
)
|
||||
if is_torch_available()
|
||||
else ()
|
||||
)
|
||||
pipeline_model_mapping = (
|
||||
{
|
||||
"feature-extraction": LlamaModel,
|
||||
"text-classification": LlamaForSequenceClassification,
|
||||
"text-generation": LlamaForCausalLM,
|
||||
"zero-shot": LlamaForSequenceClassification,
|
||||
"question-answering": LlamaForQuestionAnswering,
|
||||
"token-classification": LlamaForTokenClassification,
|
||||
}
|
||||
if is_torch_available()
|
||||
else {}
|
||||
)
|
||||
test_headmasking = False
|
||||
test_pruning = False
|
||||
fx_compatible = False # Broken by attention refactor cc @Cyrilvallez
|
||||
model_tester_class = LlamaModelTester
|
||||
|
||||
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
|
||||
# This is because we are hitting edge cases with the causal_mask buffer
|
||||
model_split_percents = [0.5, 0.7, 0.8]
|
||||
|
||||
# used in `test_torch_compile_for_training`
|
||||
_torch_compile_train_cls = LlamaForCausalLM if is_torch_available() else None
|
||||
|
||||
|
||||
@require_torch_accelerator
|
||||
@require_read_token
|
||||
class LlamaIntegrationTest(unittest.TestCase):
|
||||
def setup(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
def tearDown(self):
|
||||
# TODO (joao): automatic compilation, i.e. compilation when `cache_implementation="static"` is used, leaves
|
||||
# some memory allocated in the cache, which means some object is not being released properly. This causes some
|
||||
# unoptimal memory usage, e.g. after certain tests a 7B model in FP16 no longer fits in a 24GB GPU.
|
||||
# Investigate the root cause.
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
@slow
|
||||
def test_llama_3_1_hard(self):
|
||||
"""
|
||||
An integration test for llama 3.1. It tests against a long output to ensure the subtle numerical differences
|
||||
from llama 3.1.'s RoPE can be detected
|
||||
"""
|
||||
expected_texts = Expectations(
|
||||
{
|
||||
("rocm", (9, 5)): 'Tell me about the french revolution. The french revolution was a period of radical social and political upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative assembly that had not met since 1614. The Third Estate, which represented the common people, demanded greater representation and eventually broke away to form the National Assembly. This marked the beginning of the end of the absolute monarchy and the rise of the middle class.\n',
|
||||
("cuda", None): 'Tell me about the french revolution. The french revolution was a period of radical political and social upheaval in France that lasted from 1789 until 1799. It was a time of great change and upheaval, marked by the overthrow of the monarchy, the rise of the middle class, and the eventual establishment of the First French Republic.\nThe revolution began in 1789 with the Estates-General, a representative assembly that had not met since 1614. The Third Estate, which represented the common people, demanded greater representation and eventually broke away to form the National Assembly. The National Assembly adopted the Declaration of the Rights of Man and of the Citizen, which enshr',
|
||||
}
|
||||
) # fmt: skip
|
||||
EXPECTED_TEXT = expected_texts.get_expectation()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
"meta-llama/Meta-Llama-3.1-8B-Instruct", device_map="auto", dtype=torch.bfloat16
|
||||
)
|
||||
input_text = ["Tell me about the french revolution."]
|
||||
model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
|
||||
|
||||
generated_ids = model.generate(**model_inputs, max_new_tokens=128, do_sample=False)
|
||||
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
||||
self.assertEqual(generated_text, EXPECTED_TEXT)
|
||||
|
||||
@slow
|
||||
def test_model_7b_logits_bf16(self):
|
||||
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
|
||||
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-hf", device_map="auto", dtype=torch.bfloat16, attn_implementation="eager"
|
||||
)
|
||||
|
||||
with torch.no_grad():
|
||||
out = model(torch.tensor([input_ids]).to(torch_device))
|
||||
# Expected mean on dim = -1
|
||||
|
||||
# fmt: off
|
||||
expected_means = Expectations(
|
||||
{
|
||||
("xpu", 3): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
|
||||
("cuda", 7): torch.tensor([[-6.5061, -4.1147, -4.9669, -3.2038, 0.8069, -2.9694, 1.2864, -3.3786]]),
|
||||
("cuda", 8): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
|
||||
("rocm", (9, 4)): torch.tensor([[-6.5094, -4.1329, -4.9754, -3.5042, 0.8082, -2.9443, 1.2830, -3.3539]]),
|
||||
})
|
||||
|
||||
expected_mean = expected_means.get_expectation().to(torch_device)
|
||||
actual_mean = out.logits.float().mean(-1)
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
expected_mean,
|
||||
actual_mean,
|
||||
atol=1e-2,
|
||||
rtol=1e-2
|
||||
)
|
||||
)
|
||||
|
||||
# slicing logits[0, 0, 0:15]
|
||||
expected_slices = Expectations(
|
||||
{
|
||||
("xpu", 3): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
|
||||
("cuda", 7): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.4688, -7.4375, -7.6875, -6.9375, -6.0312, -7.0000, -1.8594, 1.8438, -8.5000]]),
|
||||
("cuda", 8): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
|
||||
("rocm", (9, 4)): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9375, -6.0312, -7.0312, -1.8594, 1.8438, -8.5000]])
|
||||
})
|
||||
# fmt: on
|
||||
expected_slice = expected_slices.get_expectation().to(torch_device)
|
||||
actual_slice = out.logits[0, 0, :15].float()
|
||||
self.assertTrue(torch.allclose(expected_slice, actual_slice, atol=1e-2, rtol=1e-2))
|
||||
|
||||
@slow
|
||||
def test_model_7b_logits(self):
|
||||
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
|
||||
|
||||
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="auto", dtype=torch.float16)
|
||||
|
||||
with torch.no_grad():
|
||||
out = model(torch.tensor([input_ids]).to(torch_device))
|
||||
|
||||
# fmt: off
|
||||
# Expected mean on dim = -1
|
||||
expected_means = Expectations(
|
||||
{
|
||||
("xpu", 3): torch.tensor([[-6.6544, -4.1259, -4.9840, -3.2456, 0.8261, -3.0124, 1.2971, -3.3641]]),
|
||||
("cuda", 7): torch.tensor([[-6.6420, -4.1227, -4.9809, -3.2041, 0.8261, -3.0052, 1.2957, -3.3648]]),
|
||||
("cuda", 8): torch.tensor([[-6.6544, -4.1259, -4.9840, -3.2456, 0.8261, -3.0124, 1.2971, -3.3641]]),
|
||||
})
|
||||
|
||||
expected_mean = expected_means.get_expectation()
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
expected_mean.to(torch_device),
|
||||
out.logits.float().mean(-1),
|
||||
atol=1e-2,
|
||||
rtol=1e-2
|
||||
)
|
||||
)
|
||||
|
||||
# slicing logits[0, 0, 0:15]
|
||||
expected_slices = Expectations(
|
||||
{
|
||||
("xpu", 3): torch.tensor([-12.8281, -7.4609, -0.4668, -8.0703, -7.2539, -8.0078, -6.4961, -7.7734, -7.8516, -7.0352, -6.2188, -7.1367, -1.8564, 1.9922, -8.6328]),
|
||||
("cuda", 7): torch.tensor([-12.8125, -7.3359, -0.4846, -8.0234, -7.2383, -7.9922, -6.4805, -7.7344, -7.8125, -7.0078, -6.1797, -7.1094, -1.8633, 1.9736, -8.6016]),
|
||||
("cuda", 8): torch.tensor([-12.8281, -7.4609, -0.4668, -8.0703, -7.2539, -8.0078, -6.4961, -7.7734, -7.8516, -7.0352, -6.2188, -7.1367, -1.8564, 1.9922, -8.6328])
|
||||
})
|
||||
# fmt: on
|
||||
|
||||
expected_slice = expected_slices.get_expectation()
|
||||
self.assertTrue(
|
||||
torch.allclose(
|
||||
expected_slice.to(torch_device),
|
||||
out.logits[0, 0, :15].float(),
|
||||
atol=1e-2,
|
||||
rtol=1e-2,
|
||||
)
|
||||
)
|
||||
|
||||
# TODO joao, manuel: remove this in v4.62.0
|
||||
# TODO: check why we have the following strange situation.
|
||||
# without running in subprocess, this test causes subsequent tests failing with `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0!`
|
||||
@run_test_using_subprocess
|
||||
@slow
|
||||
def test_model_7b_dola_generation(self):
|
||||
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
|
||||
EXPECTED_TEXT_COMPLETION = (
|
||||
"Simply put, the theory of relativity states that 1) time and space are relative, and 2) the laws of "
|
||||
"physics are the same for all observers in uniform motion relative to one another.\n\nThe theory of "
|
||||
"relativity was developed by Albert Einstein in the early 20th century, and it revolutionized our "
|
||||
"understanding of space and time."
|
||||
)
|
||||
prompt = "Simply put, the theory of relativity states that "
|
||||
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-chat-hf", device_map="sequential", dtype=torch.float16
|
||||
)
|
||||
model_inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
||||
|
||||
# greedy generation outputs
|
||||
generated_ids = model.generate(
|
||||
**model_inputs,
|
||||
max_new_tokens=64,
|
||||
top_p=None,
|
||||
temperature=1,
|
||||
do_sample=False,
|
||||
dola_layers="low",
|
||||
trust_remote_code=True,
|
||||
custom_generate="transformers-community/dola",
|
||||
)
|
||||
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
||||
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
|
||||
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
@pytest.mark.torch_compile_test
|
||||
def test_compile_static_cache(self):
|
||||
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
|
||||
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
|
||||
if version.parse(torch.__version__) < version.parse("2.3.0"):
|
||||
self.skipTest(reason="This test requires torch >= 2.3 to run.")
|
||||
|
||||
NUM_TOKENS_TO_GENERATE = 40
|
||||
# Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test
|
||||
# was changed to have a cache of 53 tokens (as opposed to 4096), on Ampere GPUs.
|
||||
EXPECTED_TEXT_COMPLETION = [
|
||||
"Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial "
|
||||
"reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe "
|
||||
"theory of relativ",
|
||||
"My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, "
|
||||
"my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p",
|
||||
]
|
||||
|
||||
prompts = [
|
||||
"Simply put, the theory of relativity states that ",
|
||||
"My favorite all time favorite condiment is ketchup.",
|
||||
]
|
||||
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right")
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-hf", device_map=torch_device, dtype=torch.float16
|
||||
)
|
||||
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
|
||||
|
||||
# Dynamic Cache
|
||||
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
|
||||
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
|
||||
|
||||
# Static Cache + compile (`generate()` internally compiles each decoding step when static cache is used)
|
||||
generated_ids = model.generate(
|
||||
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
|
||||
)
|
||||
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
|
||||
|
||||
@slow
|
||||
@pytest.mark.torch_export_test
|
||||
def test_export_static_cache(self):
|
||||
if version.parse(torch.__version__) < version.parse("2.4.0"):
|
||||
self.skipTest(reason="This test requires torch >= 2.4 to run.")
|
||||
|
||||
from transformers.integrations.executorch import (
|
||||
TorchExportableModuleWithStaticCache,
|
||||
)
|
||||
|
||||
llama_models = {
|
||||
"meta-llama/Llama-3.2-1B": [
|
||||
"Simply put, the theory of relativity states that 1) the speed of light is the same for all "
|
||||
"observers, regardless of their location, and 2) the laws of physics are the same for all observers"
|
||||
],
|
||||
}
|
||||
|
||||
for llama_model_ckp, EXPECTED_TEXT_COMPLETION in llama_models.items():
|
||||
# Load tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(llama_model_ckp, pad_token="</s>", padding_side="right")
|
||||
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
|
||||
"input_ids"
|
||||
].shape[-1]
|
||||
|
||||
# Load model
|
||||
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
|
||||
dtype = torch.bfloat16
|
||||
cache_implementation = "static"
|
||||
attn_implementation = "sdpa"
|
||||
batch_size = 1
|
||||
model = LlamaForCausalLM.from_pretrained(
|
||||
llama_model_ckp,
|
||||
device_map=device,
|
||||
dtype=dtype,
|
||||
attn_implementation=attn_implementation,
|
||||
generation_config=GenerationConfig(
|
||||
use_cache=True,
|
||||
cache_implementation=cache_implementation,
|
||||
max_length=max_generation_length,
|
||||
cache_config={
|
||||
"batch_size": batch_size,
|
||||
"max_cache_len": max_generation_length,
|
||||
"device": device,
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
prompts = ["Simply put, the theory of relativity states that "]
|
||||
prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
|
||||
prompt_token_ids = prompt_tokens["input_ids"]
|
||||
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
|
||||
|
||||
# Static Cache + export
|
||||
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
|
||||
|
||||
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
|
||||
exported_program = exportable_module.export(
|
||||
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
|
||||
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
|
||||
)
|
||||
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
|
||||
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
|
||||
)
|
||||
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
|
||||
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
|
||||
|
||||
|
||||
@slow
|
||||
@require_torch_accelerator
|
||||
class Mask4DTestHard(unittest.TestCase):
|
||||
def tearDown(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
|
||||
def setUp(self):
|
||||
cleanup(torch_device, gc_collect=True)
|
||||
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
||||
self.model_dtype = torch.float32
|
||||
self.tokenizer = LlamaTokenizer.from_pretrained(model_name)
|
||||
self.model = LlamaForCausalLM.from_pretrained(model_name, dtype=self.model_dtype).to(torch_device)
|
||||
|
||||
def get_test_data(self):
|
||||
template = "my favorite {}"
|
||||
items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item
|
||||
|
||||
batch_separate = [template.format(x) for x in items] # 3 separate lines
|
||||
batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated
|
||||
|
||||
input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device)
|
||||
input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device)
|
||||
|
||||
mask_shared_prefix = torch.tensor(
|
||||
[
|
||||
[
|
||||
[
|
||||
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
|
||||
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
|
||||
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
|
||||
]
|
||||
]
|
||||
],
|
||||
device=torch_device,
|
||||
)
|
||||
|
||||
position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device)
|
||||
|
||||
# building custom positions ids based on custom mask
|
||||
position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1)
|
||||
# effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device)
|
||||
|
||||
# inverting the mask
|
||||
min_dtype = torch.finfo(self.model_dtype).min
|
||||
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype
|
||||
|
||||
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
|
||||
|
||||
def test_stacked_causal_mask(self):
|
||||
(
|
||||
input_ids,
|
||||
position_ids,
|
||||
input_ids_shared_prefix,
|
||||
mask_shared_prefix,
|
||||
position_ids_shared_prefix,
|
||||
) = self.get_test_data()
|
||||
|
||||
# regular batch
|
||||
logits = self.model.forward(input_ids, position_ids=position_ids).logits
|
||||
logits_last = logits[:, -1, :] # last tokens in each batch line
|
||||
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
|
||||
|
||||
# single forward run with 4D custom mask
|
||||
logits_shared_prefix = self.model.forward(
|
||||
input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix
|
||||
).logits
|
||||
logits_shared_prefix_last = logits_shared_prefix[
|
||||
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
|
||||
] # last three tokens
|
||||
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
|
||||
|
||||
self.assertEqual(decoded, decoded_shared_prefix)
|
||||
|
||||
def test_partial_stacked_causal_mask(self):
|
||||
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
|
||||
|
||||
(
|
||||
input_ids,
|
||||
position_ids,
|
||||
input_ids_shared_prefix,
|
||||
mask_shared_prefix,
|
||||
position_ids_shared_prefix,
|
||||
) = self.get_test_data()
|
||||
|
||||
# regular batch
|
||||
logits = self.model.forward(input_ids, position_ids=position_ids).logits
|
||||
logits_last = logits[:, -1, :] # last tokens in each batch line
|
||||
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
|
||||
|
||||
# 2 forward runs with custom 4D masks
|
||||
part_a = 3 # split point
|
||||
|
||||
input_1a = input_ids_shared_prefix[:, :part_a]
|
||||
position_ids_1a = position_ids_shared_prefix[:, :part_a]
|
||||
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
|
||||
|
||||
outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a)
|
||||
past_key_values_a = outs_1a["past_key_values"]
|
||||
|
||||
# Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len])
|
||||
input_1b = input_ids_shared_prefix[:, part_a:]
|
||||
position_ids_1b = position_ids_shared_prefix[:, part_a:]
|
||||
mask_1b = mask_shared_prefix[:, :, part_a:, :]
|
||||
outs_1b = self.model.forward(
|
||||
input_1b,
|
||||
attention_mask=mask_1b,
|
||||
position_ids=position_ids_1b,
|
||||
past_key_values=past_key_values_a,
|
||||
)
|
||||
decoded_1b = [
|
||||
self.tokenizer.decode(t)
|
||||
for t in outs_1b.logits.argmax(-1)[
|
||||
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
|
||||
]
|
||||
]
|
||||
self.assertEqual(decoded, decoded_1b)
|
||||
|
||||
def test_stacked_causal_mask_static_cache(self):
|
||||
"""same as above but with StaticCache"""
|
||||
(
|
||||
input_ids,
|
||||
position_ids,
|
||||
input_ids_shared_prefix,
|
||||
mask_shared_prefix,
|
||||
position_ids_shared_prefix,
|
||||
) = self.get_test_data()
|
||||
|
||||
# regular batch
|
||||
logits = self.model.forward(input_ids, position_ids=position_ids).logits
|
||||
logits_last = logits[:, -1, :] # last tokens in each batch line
|
||||
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
|
||||
|
||||
# upgrade the model with StaticCache
|
||||
max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1]
|
||||
past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len)
|
||||
|
||||
padded_attention_mask = torch.nn.functional.pad(
|
||||
input=mask_shared_prefix,
|
||||
pad=(0, max_cache_len - mask_shared_prefix.shape[-1]),
|
||||
mode="constant",
|
||||
value=torch.finfo(self.model_dtype).min,
|
||||
)
|
||||
|
||||
# single forward run with 4D custom mask
|
||||
logits_shared_prefix = self.model.forward(
|
||||
input_ids_shared_prefix,
|
||||
attention_mask=padded_attention_mask,
|
||||
position_ids=position_ids_shared_prefix,
|
||||
cache_position=torch.arange(input_ids_shared_prefix.shape[-1], device=torch_device),
|
||||
past_key_values=past_key_values,
|
||||
).logits
|
||||
logits_shared_prefix_last = logits_shared_prefix[
|
||||
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
|
||||
] # last three tokens
|
||||
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
|
||||
|
||||
self.assertEqual(decoded, decoded_shared_prefix)
|
||||
|
||||
def test_partial_stacked_causal_mask_static_cache(self):
|
||||
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
|
||||
# we pass a 4D attention mask shaped [..., seq_len, full_static_cache_len])
|
||||
(
|
||||
input_ids,
|
||||
position_ids,
|
||||
input_ids_shared_prefix,
|
||||
mask_shared_prefix,
|
||||
position_ids_shared_prefix,
|
||||
) = self.get_test_data()
|
||||
|
||||
# regular batch
|
||||
logits = self.model.forward(input_ids, position_ids=position_ids).logits
|
||||
logits_last = logits[:, -1, :] # last tokens in each batch line
|
||||
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
|
||||
|
||||
# upgrade the model with StaticCache
|
||||
max_cache_len = 16 # note that max_cache_len is greater than the attention_mask.shape[-1]
|
||||
past_key_values = StaticCache(config=self.model.config, max_cache_len=max_cache_len)
|
||||
|
||||
# forward run for the first part of input
|
||||
part_a = 3 # split point
|
||||
|
||||
input_1a = input_ids_shared_prefix[:, :part_a]
|
||||
position_ids_1a = position_ids_shared_prefix[:, :part_a]
|
||||
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
|
||||
|
||||
padded_mask_1a = torch.nn.functional.pad(
|
||||
input=mask_1a,
|
||||
pad=(0, max_cache_len - mask_1a.shape[-1]),
|
||||
mode="constant",
|
||||
value=torch.finfo(self.model_dtype).min,
|
||||
)
|
||||
|
||||
_ = self.model.forward(
|
||||
input_1a,
|
||||
attention_mask=padded_mask_1a,
|
||||
position_ids=position_ids_1a,
|
||||
cache_position=torch.arange(part_a, device=torch_device),
|
||||
past_key_values=past_key_values,
|
||||
)
|
||||
|
||||
# forward run for the second part of input
|
||||
input_1b = input_ids_shared_prefix[:, part_a:]
|
||||
position_ids_1b = position_ids_shared_prefix[:, part_a:]
|
||||
mask_1b = mask_shared_prefix[:, :, part_a:, :]
|
||||
|
||||
padded_mask_1b = torch.nn.functional.pad(
|
||||
input=mask_1b, pad=(0, max_cache_len - mask_1b.shape[-1]), mode="constant", value=0
|
||||
)
|
||||
|
||||
outs_1b = self.model.forward(
|
||||
input_1b,
|
||||
attention_mask=padded_mask_1b,
|
||||
position_ids=position_ids_1b,
|
||||
cache_position=torch.arange(
|
||||
part_a,
|
||||
input_ids_shared_prefix.shape[-1],
|
||||
device=torch_device,
|
||||
),
|
||||
past_key_values=past_key_values,
|
||||
)
|
||||
decoded_1b = [
|
||||
self.tokenizer.decode(t)
|
||||
for t in outs_1b.logits.argmax(-1)[
|
||||
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
|
||||
]
|
||||
]
|
||||
self.assertEqual(decoded, decoded_1b)
|
||||
915
transformers/tests/models/llama/test_tokenization_llama.py
Normal file
915
transformers/tests/models/llama/test_tokenization_llama.py
Normal file
@@ -0,0 +1,915 @@
|
||||
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers import (
|
||||
SPIECE_UNDERLINE,
|
||||
AddedToken,
|
||||
AutoTokenizer,
|
||||
LlamaTokenizer,
|
||||
LlamaTokenizerFast,
|
||||
PreTrainedTokenizerFast,
|
||||
)
|
||||
from transformers.convert_slow_tokenizer import convert_slow_tokenizer
|
||||
from transformers.testing_utils import (
|
||||
get_tests_dir,
|
||||
nested_simplify,
|
||||
require_jinja,
|
||||
require_read_token,
|
||||
require_sentencepiece,
|
||||
require_tiktoken,
|
||||
require_tokenizers,
|
||||
require_torch,
|
||||
slow,
|
||||
)
|
||||
|
||||
from ...test_tokenization_common import TokenizerTesterMixin
|
||||
|
||||
|
||||
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
|
||||
|
||||
|
||||
@require_sentencepiece
|
||||
@require_tokenizers
|
||||
class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
||||
from_pretrained_id = ["hf-internal-testing/llama-tokenizer", "meta-llama/Llama-2-7b-hf"]
|
||||
tokenizer_class = LlamaTokenizer
|
||||
rust_tokenizer_class = LlamaTokenizerFast
|
||||
|
||||
test_rust_tokenizer = False
|
||||
test_sentencepiece = True
|
||||
from_pretrained_kwargs = {}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super().setUpClass()
|
||||
|
||||
# We have a SentencePiece fixture for testing
|
||||
tokenizer = LlamaTokenizer(SAMPLE_VOCAB, keep_accents=True)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
tokenizer.save_pretrained(cls.tmpdirname)
|
||||
|
||||
def get_tokenizers(self, **kwargs):
|
||||
kwargs.update({"pad_token": "<PAD>"})
|
||||
return super().get_tokenizers(**kwargs)
|
||||
|
||||
def test_full_tokenizer(self):
|
||||
tokenizer = LlamaTokenizer(SAMPLE_VOCAB, keep_accents=True)
|
||||
|
||||
tokens = tokenizer.tokenize("This is a test")
|
||||
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
|
||||
|
||||
self.assertListEqual(
|
||||
tokenizer.convert_tokens_to_ids(tokens),
|
||||
[285, 46, 10, 170, 382],
|
||||
)
|
||||
|
||||
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
|
||||
self.assertListEqual(
|
||||
tokens,
|
||||
[
|
||||
SPIECE_UNDERLINE + "I",
|
||||
SPIECE_UNDERLINE + "was",
|
||||
SPIECE_UNDERLINE + "b",
|
||||
"or",
|
||||
"n",
|
||||
SPIECE_UNDERLINE + "in",
|
||||
SPIECE_UNDERLINE + "",
|
||||
"9",
|
||||
"2",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
",",
|
||||
SPIECE_UNDERLINE + "and",
|
||||
SPIECE_UNDERLINE + "this",
|
||||
SPIECE_UNDERLINE + "is",
|
||||
SPIECE_UNDERLINE + "f",
|
||||
"al",
|
||||
"s",
|
||||
"é",
|
||||
".",
|
||||
],
|
||||
)
|
||||
ids = tokenizer.convert_tokens_to_ids(tokens)
|
||||
self.assertListEqual(
|
||||
ids,
|
||||
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
|
||||
)
|
||||
|
||||
back_tokens = tokenizer.convert_ids_to_tokens(ids)
|
||||
self.assertListEqual(
|
||||
back_tokens,
|
||||
[
|
||||
SPIECE_UNDERLINE + "I",
|
||||
SPIECE_UNDERLINE + "was",
|
||||
SPIECE_UNDERLINE + "b",
|
||||
"or",
|
||||
"n",
|
||||
SPIECE_UNDERLINE + "in",
|
||||
SPIECE_UNDERLINE + "",
|
||||
"<unk>",
|
||||
"2",
|
||||
"0",
|
||||
"0",
|
||||
"0",
|
||||
",",
|
||||
SPIECE_UNDERLINE + "and",
|
||||
SPIECE_UNDERLINE + "this",
|
||||
SPIECE_UNDERLINE + "is",
|
||||
SPIECE_UNDERLINE + "f",
|
||||
"al",
|
||||
"s",
|
||||
"<unk>",
|
||||
".",
|
||||
],
|
||||
)
|
||||
|
||||
@unittest.skip(reason="Let's wait for the fast tokenizer!")
|
||||
def test_save_pretrained(self):
|
||||
self.tokenizers_list += (self.rust_tokenizer_class, "hf-internal-testing/llama-tokenizer", {})
|
||||
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
|
||||
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
|
||||
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
|
||||
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
|
||||
|
||||
tmpdirname2 = tempfile.mkdtemp()
|
||||
|
||||
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
|
||||
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
|
||||
|
||||
# Checks it save with the same files + the tokenizer.json file for the fast one
|
||||
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
|
||||
tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
|
||||
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
|
||||
|
||||
# Checks everything loads correctly in the same way
|
||||
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
|
||||
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
|
||||
|
||||
# Check special tokens are set accordingly on Rust and Python
|
||||
for key in tokenizer_pp.special_tokens_map:
|
||||
self.assertTrue(hasattr(tokenizer_rp, key))
|
||||
|
||||
shutil.rmtree(tmpdirname2)
|
||||
|
||||
# Save tokenizer rust, legacy_format=True
|
||||
tmpdirname2 = tempfile.mkdtemp()
|
||||
|
||||
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
|
||||
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
|
||||
|
||||
# Checks it save with the same files
|
||||
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
|
||||
|
||||
# Checks everything loads correctly in the same way
|
||||
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
|
||||
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
|
||||
|
||||
# Check special tokens are set accordingly on Rust and Python
|
||||
for key in tokenizer_pp.special_tokens_map:
|
||||
self.assertTrue(hasattr(tokenizer_rp, key))
|
||||
|
||||
shutil.rmtree(tmpdirname2)
|
||||
|
||||
# Save tokenizer rust, legacy_format=False
|
||||
tmpdirname2 = tempfile.mkdtemp()
|
||||
|
||||
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
|
||||
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
|
||||
|
||||
# Checks it saved the tokenizer.json file
|
||||
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
|
||||
|
||||
# Checks everything loads correctly in the same way
|
||||
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
|
||||
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
|
||||
|
||||
# Check special tokens are set accordingly on Rust and Python
|
||||
for key in tokenizer_pp.special_tokens_map:
|
||||
self.assertTrue(hasattr(tokenizer_rp, key))
|
||||
|
||||
shutil.rmtree(tmpdirname2)
|
||||
|
||||
@require_torch
|
||||
def test_batch_tokenization(self):
|
||||
if not self.test_seq2seq:
|
||||
self.skipTest(reason="test_seq2seq is set to False")
|
||||
|
||||
tokenizers = self.get_tokenizers()
|
||||
for tokenizer in tokenizers:
|
||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
||||
# Longer text that will definitely require truncation.
|
||||
text = [
|
||||
" UN Chief Says There Is No Military Solution in Syria",
|
||||
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
|
||||
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
|
||||
" will only worsen the violence and misery for millions of people.",
|
||||
]
|
||||
try:
|
||||
batch = tokenizer(
|
||||
text=text,
|
||||
max_length=3,
|
||||
return_tensors="pt",
|
||||
)
|
||||
except NotImplementedError:
|
||||
self.skipTest(reason="Encountered NotImplementedError when calling tokenizer")
|
||||
self.assertEqual(batch.input_ids.shape[1], 3)
|
||||
# max_target_length will default to max_length if not specified
|
||||
batch = tokenizer(text, max_length=3, return_tensors="pt")
|
||||
self.assertEqual(batch.input_ids.shape[1], 3)
|
||||
|
||||
batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt")
|
||||
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
|
||||
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
|
||||
self.assertNotIn("decoder_input_ids", batch_encoder_only)
|
||||
|
||||
@unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.")
|
||||
def test_save_slow_from_fast_and_reload_fast(self):
|
||||
pass
|
||||
|
||||
def test_special_tokens_initialization(self):
|
||||
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
|
||||
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
|
||||
added_tokens = [AddedToken("<special>", lstrip=True)]
|
||||
|
||||
tokenizer_r = self.get_rust_tokenizer(
|
||||
pretrained_name, additional_special_tokens=added_tokens, **kwargs
|
||||
)
|
||||
r_output = tokenizer_r.encode("Hey this is a <special> token")
|
||||
|
||||
special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
|
||||
|
||||
self.assertTrue(special_token_id in r_output)
|
||||
|
||||
if self.test_slow_tokenizer:
|
||||
tokenizer_cr = self.get_rust_tokenizer(
|
||||
pretrained_name,
|
||||
additional_special_tokens=added_tokens,
|
||||
**kwargs, # , from_slow=True <- unfortunately too slow to convert
|
||||
)
|
||||
tokenizer_p = self.tokenizer_class.from_pretrained(
|
||||
pretrained_name, additional_special_tokens=added_tokens, **kwargs
|
||||
)
|
||||
|
||||
p_output = tokenizer_p.encode("Hey this is a <special> token")
|
||||
|
||||
cr_output = tokenizer_cr.encode("Hey this is a <special> token")
|
||||
|
||||
self.assertEqual(p_output, r_output)
|
||||
self.assertEqual(cr_output, r_output)
|
||||
self.assertTrue(special_token_id in p_output)
|
||||
self.assertTrue(special_token_id in cr_output)
|
||||
|
||||
@slow
|
||||
def test_tokenizer_integration(self):
|
||||
expected_encoding = {'input_ids': [[1, 4103, 689, 414, 313, 24784, 368, 2998, 408, 282, 3637, 25350, 29899, 9067, 414, 322, 282, 3637, 25350, 29899, 1457, 3018, 1312, 29899, 2151, 29897, 8128, 2498, 29899, 15503, 4220, 6956, 1973, 313, 13635, 29911, 29892, 402, 7982, 29899, 29906, 29892, 1528, 13635, 29911, 29874, 29892, 1060, 26369, 29892, 6652, 309, 29933, 814, 29892, 1060, 29931, 6779, 11410, 363, 18385, 17088, 7634, 11235, 313, 25103, 29965, 29897, 322, 18385, 17088, 28203, 313, 25103, 29954, 29897, 411, 975, 29871, 29941, 29906, 29974, 758, 3018, 1312, 4733, 297, 29871, 29896, 29900, 29900, 29974, 10276, 322, 6483, 1006, 3372, 3097, 1546, 435, 1165, 29892, 10772, 29911, 25350, 322, 323, 6073, 17907, 29889], [1, 350, 20161, 338, 8688, 304, 758, 29899, 14968, 6483, 21000, 8684, 284, 22540, 515, 443, 29880, 24025, 1426, 491, 14002, 368, 4195, 292, 373, 1716, 2175, 322, 1492, 3030, 297, 599, 15359, 29889], [1, 450, 4996, 17354, 1701, 29916, 432, 17204, 975, 278, 17366, 11203, 29889]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
|
||||
|
||||
self.tokenizer_integration_test_util(
|
||||
expected_encoding=expected_encoding,
|
||||
model_name="hf-internal-testing/llama-tokenizer",
|
||||
revision="0984d03108b1a041ed679bd253b6519b7e1a4778",
|
||||
padding=False,
|
||||
)
|
||||
|
||||
def test_picklable(self):
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
shutil.copyfile(SAMPLE_VOCAB, f.name)
|
||||
tokenizer = LlamaTokenizer(f.name, keep_accents=True)
|
||||
pickled_tokenizer = pickle.dumps(tokenizer)
|
||||
pickle.loads(pickled_tokenizer)
|
||||
|
||||
@unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
|
||||
def test_pickle_subword_regularization_tokenizer(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
|
||||
def test_subword_regularization_tokenizer(self):
|
||||
pass
|
||||
|
||||
def test_add_prefix_space(self):
|
||||
pretrained_name = "hf-internal-testing/llama-tokenizer-non-normalized"
|
||||
inputs = "Hey how are you doing"
|
||||
EXPECTED_WITH_SPACE = [1, 18637, 920, 526, 366, 2599]
|
||||
EXPECTED_WO_SPACE = [1, 29950, 1032, 920, 526, 366, 2599]
|
||||
|
||||
slow_ = self.get_tokenizer(pretrained_name, add_prefix_space=False, legacy=False)
|
||||
fast_ = self.get_rust_tokenizer(pretrained_name, add_prefix_space=False, legacy=False)
|
||||
self.assertEqual(slow_.encode(inputs), EXPECTED_WO_SPACE)
|
||||
self.assertEqual(slow_.encode(inputs), fast_.encode(inputs))
|
||||
self.assertEqual(slow_.tokenize(inputs), ["H", "ey", "▁how", "▁are", "▁you", "▁doing"])
|
||||
self.assertEqual(slow_.decode(EXPECTED_WO_SPACE, skip_special_tokens=True), inputs)
|
||||
self.assertEqual(
|
||||
slow_.decode(EXPECTED_WO_SPACE, skip_special_tokens=True),
|
||||
fast_.decode(EXPECTED_WO_SPACE, skip_special_tokens=True),
|
||||
)
|
||||
|
||||
slow_ = self.get_tokenizer(pretrained_name, add_prefix_space=True, legacy=False)
|
||||
fast_ = self.get_rust_tokenizer(pretrained_name, add_prefix_space=True, legacy=False)
|
||||
self.assertEqual(slow_.encode(inputs), EXPECTED_WITH_SPACE)
|
||||
self.assertEqual(slow_.encode(inputs), fast_.encode(inputs))
|
||||
self.assertEqual(slow_.tokenize(inputs), ["▁Hey", "▁how", "▁are", "▁you", "▁doing"])
|
||||
self.assertEqual(slow_.decode(EXPECTED_WITH_SPACE, skip_special_tokens=True), inputs)
|
||||
self.assertEqual(
|
||||
slow_.decode(EXPECTED_WITH_SPACE, skip_special_tokens=True),
|
||||
fast_.decode(EXPECTED_WITH_SPACE, skip_special_tokens=True),
|
||||
)
|
||||
|
||||
def test_load_tokenizer_with_model_file_only(self):
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
hf_hub_download(repo_id="huggyllama/llama-7b", filename="tokenizer.model", local_dir=tmp_dir)
|
||||
tokenizer_fast = self.rust_tokenizer_class.from_pretrained(tmp_dir)
|
||||
self.assertEqual(tokenizer_fast.encode("This is a test"), [1, 910, 338, 263, 1243])
|
||||
|
||||
tokenizer_slow = self.tokenizer_class.from_pretrained(tmp_dir)
|
||||
self.assertEqual(tokenizer_slow.encode("This is a test"), [1, 910, 338, 263, 1243])
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_sentencepiece
|
||||
@require_tokenizers
|
||||
class LlamaIntegrationTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
checkpoint_name = "hf-internal-testing/llama-tokenizer-non-normalized"
|
||||
cls.tokenizer: LlamaTokenizer = LlamaTokenizer.from_pretrained(checkpoint_name)
|
||||
cls.rust_tokenizer = LlamaTokenizerFast.from_pretrained(checkpoint_name)
|
||||
return cls
|
||||
|
||||
@require_torch
|
||||
def integration_tests(self):
|
||||
inputs = self.tokenizer(
|
||||
["The following string should be properly encoded: Hello.", "But ird and ปี ird ด"],
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
nested_simplify(inputs),
|
||||
{
|
||||
"input_ids": [
|
||||
[1, 450, 1494, 1347, 881, 367, 6284, 18511, 29901, 15043, 29889],
|
||||
[1, 1205, 29871, 1823, 322, 29871, 31010, 30691, 1678, 1823, 1678, 30718],
|
||||
],
|
||||
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
||||
},
|
||||
)
|
||||
|
||||
def test_fast_special_tokens(self):
|
||||
slow_tokenizer = self.tokenizer
|
||||
fast_tokenizer = self.rust_tokenizer
|
||||
slow = slow_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert slow == [1, 319, 4559, 1243]
|
||||
|
||||
fast_tokenizer.add_eos_token = False
|
||||
fast = fast_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert fast == [1, 319, 4559, 1243]
|
||||
|
||||
fast_tokenizer.add_eos_token = True
|
||||
print(fast_tokenizer.add_eos_token)
|
||||
fast = fast_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert fast == [1, 319, 4559, 1243, 2]
|
||||
|
||||
slow_tokenizer.add_eos_token = True
|
||||
slow = slow_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert slow == [1, 319, 4559, 1243, 2]
|
||||
|
||||
fast_tokenizer = LlamaTokenizerFast.from_pretrained(
|
||||
"hf-internal-testing/llama-tokenizer", add_eos_token=True, add_bos_token=False
|
||||
)
|
||||
fast = fast_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert fast == [319, 4559, 1243, 2]
|
||||
|
||||
slow_tokenizer = LlamaTokenizer.from_pretrained(
|
||||
"hf-internal-testing/llama-tokenizer", add_eos_token=True, add_bos_token=False
|
||||
)
|
||||
slow = slow_tokenizer.encode("A sample test", add_special_tokens=True)
|
||||
assert slow == [319, 4559, 1243, 2]
|
||||
|
||||
self.tokenizer.add_eos_token = False
|
||||
self.rust_tokenizer.add_eos_token = False
|
||||
|
||||
# See internal discussion: https://huggingface.slack.com/archives/C01NE71C4F7/p1750680376085749?thread_ts=1750676268.233309&cid=C01NE71C4F7
|
||||
@unittest.skip("failing, won't fix")
|
||||
@slow
|
||||
def test_conversion(self):
|
||||
# This is excruciatingly slow since it has to recreate the entire merge
|
||||
# list from the original vocabulary in spm
|
||||
self.rust_tokenizer.save_pretrained("./out")
|
||||
with tempfile.TemporaryDirectory() as dirname:
|
||||
self.rust_tokenizer.save_pretrained(dirname)
|
||||
|
||||
with open(os.path.join(dirname, "tokenizer.json")) as f:
|
||||
old_serialized = f.read()
|
||||
|
||||
new_tokenizer = convert_slow_tokenizer(self.tokenizer)
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
new_tokenizer.save(f.name)
|
||||
# Re-opening since `f` is in bytes.
|
||||
new_serialized = open(f.name).read()
|
||||
with open("out_tokenizer.json", "w") as g:
|
||||
g.write(new_serialized)
|
||||
|
||||
self.assertEqual(old_serialized, new_serialized)
|
||||
|
||||
def test_simple_encode_decode(self):
|
||||
pyth_tokenizer = self.tokenizer
|
||||
rust_tokenizer = self.rust_tokenizer
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode("This is a test"), [1, 910, 338, 263, 1243])
|
||||
self.assertEqual(rust_tokenizer.encode("This is a test"), [1, 910, 338, 263, 1243])
|
||||
self.assertEqual(pyth_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test")
|
||||
self.assertEqual(rust_tokenizer.decode([1, 910, 338, 263, 1243], skip_special_tokens=True), "This is a test")
|
||||
|
||||
# bytefallback showcase
|
||||
self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip
|
||||
self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392]) # fmt: skip
|
||||
self.assertEqual(
|
||||
pyth_tokenizer.decode(
|
||||
[1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True
|
||||
),
|
||||
"生活的真谛是",
|
||||
)
|
||||
self.assertEqual(
|
||||
rust_tokenizer.decode(
|
||||
[1, 29871, 30486, 31704, 30210, 30848, 235, 179, 158, 30392], skip_special_tokens=True
|
||||
),
|
||||
"生活的真谛是",
|
||||
)
|
||||
|
||||
# Inner spaces showcase
|
||||
self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [1, 6324, 29871, 15043])
|
||||
self.assertEqual(rust_tokenizer.encode("Hi Hello"), [1, 6324, 29871, 15043])
|
||||
self.assertEqual(pyth_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), "Hi Hello")
|
||||
self.assertEqual(rust_tokenizer.decode([1, 6324, 29871, 15043], skip_special_tokens=True), "Hi Hello")
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [1, 6324, 259, 15043])
|
||||
self.assertEqual(rust_tokenizer.encode("Hi Hello"), [1, 6324, 259, 15043])
|
||||
self.assertEqual(pyth_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), "Hi Hello")
|
||||
self.assertEqual(rust_tokenizer.decode([1, 6324, 259, 15043], skip_special_tokens=True), "Hi Hello")
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(""), [1])
|
||||
self.assertEqual(rust_tokenizer.encode(""), [1])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" "), [1, 259])
|
||||
self.assertEqual(rust_tokenizer.encode(" "), [1, 259])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" "), [1, 1678])
|
||||
self.assertEqual(rust_tokenizer.encode(" "), [1, 1678])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" Hello"), [1, 29871, 15043])
|
||||
self.assertEqual(rust_tokenizer.encode(" Hello"), [1, 29871, 15043])
|
||||
|
||||
def test_no_differences_showcase(self):
|
||||
pyth_tokenizer = self.tokenizer
|
||||
rust_tokenizer = self.rust_tokenizer
|
||||
self.assertEqual(pyth_tokenizer.encode(""), [1])
|
||||
self.assertEqual(rust_tokenizer.encode(""), [1])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" "), [1, 259])
|
||||
self.assertEqual(rust_tokenizer.encode(" "), [1, 259])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" "), [1, 1678])
|
||||
self.assertEqual(rust_tokenizer.encode(" "), [1, 1678])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode(" Hello"), [1, 29871, 15043])
|
||||
self.assertEqual(rust_tokenizer.encode(" Hello"), [1, 29871, 15043])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode("<s>"), [1, 1])
|
||||
self.assertEqual(rust_tokenizer.encode("<s>"), [1, 1])
|
||||
|
||||
def test_no_differences_decode(self):
|
||||
pyth_tokenizer = self.tokenizer
|
||||
rust_tokenizer = self.rust_tokenizer
|
||||
|
||||
self.assertEqual(pyth_tokenizer.decode([869]), ".")
|
||||
self.assertEqual(rust_tokenizer.decode([869]), ".")
|
||||
|
||||
self.assertEqual(pyth_tokenizer.decode([30112, 869]), "ا .")
|
||||
self.assertEqual(rust_tokenizer.decode([30112, 869]), "ا .")
|
||||
|
||||
def test_no_differences_special_tokens(self):
|
||||
pyth_tokenizer = self.tokenizer
|
||||
rust_tokenizer = self.rust_tokenizer
|
||||
self.assertEqual(pyth_tokenizer.encode(""), [1])
|
||||
self.assertEqual(rust_tokenizer.encode(""), [1])
|
||||
|
||||
self.assertEqual(pyth_tokenizer.encode("<s>"), [1, 1])
|
||||
self.assertEqual(rust_tokenizer.encode("<s>"), [1, 1])
|
||||
|
||||
@unittest.skipIf(
|
||||
os.getenv("RUN_TOKENIZER_INTEGRATION", "0") == "0",
|
||||
"RUN_TOKENIZER_INTEGRATION=1 to run tokenizer integration tests",
|
||||
)
|
||||
def test_integration_test_xnli(self):
|
||||
import tqdm
|
||||
|
||||
pyth_tokenizer = self.tokenizer
|
||||
rust_tokenizer = self.rust_tokenizer
|
||||
|
||||
dataset = load_dataset("google/code_x_glue_ct_code_to_text", "go")
|
||||
for item in tqdm.tqdm(dataset["validation"]):
|
||||
string = item["code"]
|
||||
encoded1 = pyth_tokenizer.encode(string)
|
||||
encoded2 = rust_tokenizer.encode(string)
|
||||
|
||||
self.assertEqual(encoded1, encoded2)
|
||||
|
||||
decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
|
||||
decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True)
|
||||
|
||||
self.assertEqual(decoded1, decoded2)
|
||||
|
||||
dataset = load_dataset("facebook/xnli", "all_languages")
|
||||
|
||||
for item in tqdm.tqdm(dataset["train"]):
|
||||
for string in item["premise"].values():
|
||||
encoded1 = pyth_tokenizer.encode(string)
|
||||
encoded2 = rust_tokenizer.encode(string)
|
||||
|
||||
self.assertEqual(encoded1, encoded2)
|
||||
|
||||
decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
|
||||
decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True)
|
||||
|
||||
self.assertEqual(decoded1, decoded2)
|
||||
|
||||
def test_special_token_special_word(self):
|
||||
# the word inform should be split as ['in', 'form']
|
||||
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
|
||||
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)], special_tokens=False)
|
||||
|
||||
example_inputs = tokenizer.tokenize("<REPR_END>inform<s>. Hey. .")
|
||||
self.assertEqual(example_inputs, ["<REPR_END>", "in", "form", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁."])
|
||||
|
||||
# Make sure dummy space is added if it is indeed the first word
|
||||
example_inputs = tokenizer.tokenize("inform<s>. Hey. .")
|
||||
self.assertEqual(example_inputs, ["▁inform", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁."])
|
||||
out1 = tokenizer.decode(
|
||||
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
|
||||
)
|
||||
self.assertEqual(out1, "<REPR_END>inform")
|
||||
out2 = tokenizer.decode(
|
||||
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=True
|
||||
)
|
||||
# decoding strips the added prefix space.
|
||||
self.assertEqual(out2, "<REPR_END>inform")
|
||||
input_ids = tokenizer.encode("<REPR_END>inform", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [32000, 262, 689]) # 29871 is the spiece underline, '▁' added as it should
|
||||
|
||||
out2 = tokenizer.decode(
|
||||
tokenizer.encode(" <REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
|
||||
)
|
||||
# TODO @ArthurZ currently we strip left and right, so this will not keep the spaces
|
||||
self.assertEqual(out2, "<REPR_END>inform")
|
||||
|
||||
### Let's make sure decoding does not add extra spaces here and there
|
||||
# TODO @ArthurZ this should be affected by the lstrip/rstrip/single word /normalize refactoring
|
||||
# Since currently we always strip left and right of the token, results are as such
|
||||
input_ids = tokenizer.encode("<s> Hello<s>how", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [1, 15043, 1, 3525])
|
||||
tokens = tokenizer.tokenize("<s> Hello<s>how", add_special_tokens=False)
|
||||
self.assertEqual(tokens, ["<s>", "▁Hello", "<s>", "how"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "<s> Hello<s>how")
|
||||
|
||||
# Let's make sure that if there are any spaces, we don't remove them!
|
||||
input_ids = tokenizer.encode(" <s> Hello<s> how", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [29871, 1, 15043, 1, 920])
|
||||
tokens = tokenizer.tokenize(" <s> Hello<s> how", add_special_tokens=False)
|
||||
self.assertEqual(tokens, ["▁", "<s>", "▁Hello", "<s>", "▁how"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "<s> Hello<s> how")
|
||||
|
||||
# Let's make sure the space is preserved
|
||||
input_ids = tokenizer.encode("hello", add_special_tokens=True)
|
||||
self.assertEqual(input_ids, [1, 22172])
|
||||
tokens = tokenizer.tokenize("hello")
|
||||
self.assertEqual(tokens, ["▁hello"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "<s> hello")
|
||||
|
||||
input_ids = tokenizer.encode("hello", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [22172])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "hello")
|
||||
|
||||
def test_no_prefix_space(self):
|
||||
tokenizer_no_prefix_space = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", add_prefix_space=False)
|
||||
no_prefix_space_tokens = tokenizer_no_prefix_space.tokenize("Hey")
|
||||
self.assertEqual(no_prefix_space_tokens, ["H", "ey"])
|
||||
|
||||
tokenizer = LlamaTokenizerFast.from_pretrained(
|
||||
"huggyllama/llama-7b", legacy=False, from_slow=True, add_prefix_space=False
|
||||
)
|
||||
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)], special_tokens=False)
|
||||
|
||||
example_inputs = tokenizer.tokenize("<REPR_END>inform<s>. Hey. .")
|
||||
self.assertEqual(example_inputs, ["<REPR_END>", "in", "form", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁."])
|
||||
|
||||
# Make sure dummy space is added if it is indeed the first word
|
||||
example_inputs = tokenizer.tokenize("inform<s>. Hey. .")
|
||||
self.assertEqual(example_inputs, ["in", "form", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁."])
|
||||
out1 = tokenizer.decode(
|
||||
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
|
||||
)
|
||||
self.assertEqual(out1, "<REPR_END>inform")
|
||||
out2 = tokenizer.decode(
|
||||
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=True
|
||||
)
|
||||
# decoding strips the added prefix space.
|
||||
self.assertEqual(out2, "<REPR_END>inform")
|
||||
input_ids = tokenizer.encode("<REPR_END>inform", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [32000, 262, 689]) # 29871 is the spiece underline, '▁' added as it should
|
||||
|
||||
out2 = tokenizer.decode(
|
||||
tokenizer.encode(" <REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
|
||||
)
|
||||
self.assertEqual(out2, "<REPR_END>inform")
|
||||
|
||||
input_ids = tokenizer.encode("<s> Hello<s>how", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [1, 15043, 1, 3525])
|
||||
tokens = tokenizer.tokenize("<s> Hello<s>how", add_special_tokens=False)
|
||||
self.assertEqual(tokens, ["<s>", "▁Hello", "<s>", "how"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "<s> Hello<s>how")
|
||||
|
||||
# Let's make sure that if there are any spaces, we don't remove them!
|
||||
input_ids = tokenizer.encode(" <s> Hello<s> how", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [29871, 1, 15043, 1, 920])
|
||||
tokens = tokenizer.tokenize(" <s> Hello<s> how", add_special_tokens=False)
|
||||
self.assertEqual(tokens, ["▁", "<s>", "▁Hello", "<s>", "▁how"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, " <s> Hello<s> how")
|
||||
|
||||
# Let's make sure the space is preserved
|
||||
input_ids = tokenizer.encode("hello", add_special_tokens=True)
|
||||
self.assertEqual(input_ids, [1, 12199])
|
||||
tokens = tokenizer.tokenize("hello")
|
||||
self.assertEqual(tokens, ["hello"])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "<s>hello")
|
||||
|
||||
input_ids = tokenizer.encode("hello", add_special_tokens=False)
|
||||
self.assertEqual(input_ids, [12199])
|
||||
decoded_tokens = tokenizer.decode(input_ids)
|
||||
self.assertEqual(decoded_tokens, "hello")
|
||||
|
||||
def test_some_edge_cases(self):
|
||||
tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False)
|
||||
|
||||
sp_tokens = tokenizer.sp_model.encode("<s>>", out_type=str)
|
||||
self.assertEqual(sp_tokens, ["<", "s", ">>"])
|
||||
tokens = tokenizer.tokenize("<s>>")
|
||||
self.assertNotEqual(sp_tokens, tokens)
|
||||
self.assertEqual(tokens, ["<s>", ">"])
|
||||
|
||||
tokens = tokenizer.tokenize("")
|
||||
self.assertEqual(tokens, [])
|
||||
self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str))
|
||||
|
||||
tokens = tokenizer.tokenize(" ")
|
||||
self.assertEqual(tokens, ["▁▁"])
|
||||
# a dummy prefix space is not added by the sp_model as it was de-activated
|
||||
self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str))
|
||||
|
||||
tokens = tokenizer.tokenize("▁")
|
||||
self.assertEqual(tokens, ["▁▁"])
|
||||
# a dummy prefix space is not added by the sp_model as it was de-activated
|
||||
self.assertEqual(tokens, tokenizer.sp_model.encode("▁▁", out_type=str))
|
||||
|
||||
tokens = tokenizer.tokenize(" ▁")
|
||||
self.assertEqual(tokens, ["▁▁▁"])
|
||||
# a dummy prefix space is not added by the sp_model as it was de-activated
|
||||
self.assertEqual(tokens, tokenizer.sp_model.encode("▁▁▁", out_type=str))
|
||||
|
||||
def test_fast_post_processor(self):
|
||||
tokenizer = LlamaTokenizerFast(
|
||||
SAMPLE_VOCAB, eos_token=None, bos_token=None, add_bos_token=False, add_eos_token=False
|
||||
)
|
||||
tokenizer.encode(" Hey ")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
tokenizer = LlamaTokenizerFast(
|
||||
SAMPLE_VOCAB, bos_token=None, eos_token="<s>", add_bos_token=True, add_eos_token=False
|
||||
)
|
||||
with self.assertRaises(ValueError):
|
||||
tokenizer = LlamaTokenizerFast(SAMPLE_VOCAB, eos_token=None, add_bos_token=True, add_eos_token=True)
|
||||
|
||||
@require_jinja
|
||||
def test_tokenization_for_chat(self):
|
||||
tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False)
|
||||
|
||||
test_chats = [
|
||||
[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
|
||||
[
|
||||
{"role": "system", "content": "You are a helpful chatbot."},
|
||||
{"role": "user", "content": "Hello!"},
|
||||
{"role": "assistant", "content": "Nice to meet you."},
|
||||
],
|
||||
[{"role": "user", "content": "Hello!"}],
|
||||
]
|
||||
# Matt: The third test case tests the default system message, but if this is ever changed in the
|
||||
# class/repo code then that test will fail, and the case will need to be updated.
|
||||
tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
|
||||
# fmt: off
|
||||
expected_tokens = [
|
||||
[1, 29961, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 13563, 7451, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10994, 29991, 518, 29914, 25580, 29962],
|
||||
[1, 29961, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 13563, 7451, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 10994, 29991, 518, 29914, 25580, 29962, 20103, 304, 5870, 366, 29889, 29871, 2],
|
||||
[1, 29961, 25580, 29962, 15043, 29991, 518, 29914, 25580, 29962]
|
||||
]
|
||||
# fmt: on
|
||||
for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
|
||||
self.assertListEqual(tokenized_chat, expected_tokens)
|
||||
|
||||
|
||||
@require_sentencepiece
|
||||
@require_tokenizers
|
||||
class CommonSpmIntegrationTests(unittest.TestCase):
|
||||
"""
|
||||
A class that regroups important test to make sure that we properly handle the special tokens.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
tokenizer = LlamaTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False)
|
||||
tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]})
|
||||
cls.tokenizer = tokenizer
|
||||
return cls
|
||||
|
||||
def test_add_dummy_prefix(self):
|
||||
# make sure `'▁'` is prepended, and outputs match sp_model's
|
||||
# `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute
|
||||
input_ids = self.tokenizer.encode(". Hello")
|
||||
self.assertEqual(input_ids, [7, 4, 156, 86, 20])
|
||||
sp_encode = self.tokenizer.sp_model.encode(". Hello")
|
||||
self.assertEqual(input_ids, [7] + sp_encode)
|
||||
tokens = self.tokenizer.tokenize(". Hello")
|
||||
self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"])
|
||||
|
||||
tokens = self.tokenizer.tokenize("")
|
||||
self.assertEqual(tokens, [])
|
||||
self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str))
|
||||
|
||||
tokens = self.tokenizer.tokenize(" ")
|
||||
self.assertEqual(tokens, [])
|
||||
self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str))
|
||||
|
||||
tokens = self.tokenizer.tokenize("▁")
|
||||
self.assertEqual(tokens, [])
|
||||
self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str))
|
||||
|
||||
def test_remove_extra_whitespaces(self):
|
||||
# make sure the extra spaces are eaten. Since the sample vocab does not have
|
||||
# `______`. sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute is set to False
|
||||
|
||||
input_ids = self.tokenizer.encode(" . Hello")
|
||||
self.assertEqual(input_ids, [7, 4, 156, 86, 20])
|
||||
sp_encode = self.tokenizer.sp_model.encode(" . Hello")
|
||||
self.assertEqual(input_ids, [7] + sp_encode)
|
||||
tokens = self.tokenizer.tokenize(" . Hello")
|
||||
self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"])
|
||||
|
||||
# `'▁'` is also a whitespace
|
||||
input_ids = self.tokenizer.encode("▁He is not")
|
||||
self.assertEqual(input_ids, [156, 46, 44])
|
||||
tokens = self.tokenizer.tokenize("▁He is not")
|
||||
sp_encode = [
|
||||
self.tokenizer.sp_model.piece_to_id("▁He"),
|
||||
self.tokenizer.sp_model.piece_to_id("▁is"),
|
||||
self.tokenizer.sp_model.piece_to_id("▁not"),
|
||||
]
|
||||
self.assertEqual(input_ids, sp_encode)
|
||||
self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) # no extra space added
|
||||
|
||||
input_ids = self.tokenizer.encode("▁He is not<s> ▁He")
|
||||
self.assertEqual(input_ids, [156, 46, 44, 1, 156])
|
||||
tokens = self.tokenizer.tokenize("▁He is not<s> ▁He")
|
||||
self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) # spaces are eaten by spm + our strip
|
||||
# make sure that the output after the extra id is the same as if
|
||||
# extra_id was not there
|
||||
input_ids = self.tokenizer.encode("▁He is not ▁He")
|
||||
self.assertEqual(input_ids, [156, 46, 44, 156])
|
||||
tokens = self.tokenizer.tokenize("▁He is not ▁He")
|
||||
self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) # spaces are eaten by spm even if not start
|
||||
|
||||
def test_character_after_special_token(self):
|
||||
# Make sure that `tokenizer.tokenize` is similar to
|
||||
# adding the equivalent special token to the vocab
|
||||
input_ids = self.tokenizer.encode("Hey <s>I")
|
||||
self.assertEqual(input_ids, [156, 30, 1, 100])
|
||||
sp_encode = self.tokenizer.sp_model.encode("Hey .I")
|
||||
# the last token should be 100
|
||||
self.assertEqual(input_ids[-1], sp_encode[-1])
|
||||
tokens = self.tokenizer.tokenize("<s>I")
|
||||
self.assertEqual(tokens, ["<s>", "I"])
|
||||
|
||||
input_ids = self.tokenizer.encode("Hello, <s>,")
|
||||
self.assertEqual(input_ids, [156, 86, 20, 3, 1, 3])
|
||||
tokens = self.tokenizer.tokenize("Hello, <s>,")
|
||||
self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","])
|
||||
|
||||
def test_special_tokens_strip(self):
|
||||
input_ids = self.tokenizer.encode(" <s> ,")
|
||||
self.assertEqual(input_ids, [1, 7, 3])
|
||||
tokens = self.tokenizer.tokenize(" <s> ,")
|
||||
# spaces are eaten by rstrip / lstrip + spm sp_model.encode(" ") = []
|
||||
self.assertEqual(tokens, ["<s>", "▁", ","])
|
||||
|
||||
input_ids = self.tokenizer.encode("No <s> ▁He")
|
||||
self.assertEqual(input_ids, [284, 1, 156])
|
||||
tokens = self.tokenizer.tokenize("No <s> ▁He")
|
||||
self.assertEqual(tokens, ["▁No", "<s>", "▁He"]) # spaces are eaten by rstrip / lstrip
|
||||
|
||||
|
||||
@require_tiktoken
|
||||
@require_read_token
|
||||
class TikTokenIntegrationTests(unittest.TestCase):
|
||||
"""
|
||||
A class that regroups important test to make sure that we properly handle the special tokens.
|
||||
"""
|
||||
|
||||
def test_tiktoken_llama(self):
|
||||
model_path = "hf-internal-testing/llama-3-8b-internal"
|
||||
subfolder = "original"
|
||||
test_text = "This is a test sentence."
|
||||
test_tokens = [128000, 2028, 374, 264, 1296, 11914, 13, 128001]
|
||||
num_reserved_special_tokens = 256
|
||||
special_tokens = [
|
||||
"<|begin_of_text|>",
|
||||
"<|end_of_text|>",
|
||||
"<|reserved_special_token_0|>",
|
||||
"<|reserved_special_token_1|>",
|
||||
"<|reserved_special_token_2|>",
|
||||
"<|reserved_special_token_3|>",
|
||||
"<|start_header_id|>",
|
||||
"<|end_header_id|>",
|
||||
"<|reserved_special_token_4|>",
|
||||
"<|eot_id|>",
|
||||
"<|python_tag|>", # end of turn
|
||||
] + [f"<|reserved_special_token_{i}|>" for i in range(5, num_reserved_special_tokens - 5)]
|
||||
|
||||
tiktoken_tokenizer = PreTrainedTokenizerFast.from_pretrained(
|
||||
model_path,
|
||||
subfolder=subfolder,
|
||||
additional_special_tokens=special_tokens,
|
||||
bos_token="<|begin_of_text|>",
|
||||
eos_token="<|end_of_text|>",
|
||||
)
|
||||
tokens = tiktoken_tokenizer.tokenize("<|begin_of_text|> " + test_text)
|
||||
self.assertEqual(tokens[0], "<|begin_of_text|>")
|
||||
|
||||
tiktoken_tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_path,
|
||||
subfolder=subfolder,
|
||||
legacy=False,
|
||||
additional_special_tokens=special_tokens,
|
||||
bos_token="<|begin_of_text|>",
|
||||
eos_token="<|end_of_text|>",
|
||||
add_bos_token=True,
|
||||
add_eos_token=True,
|
||||
)
|
||||
self.assertTrue(isinstance(tiktoken_tokenizer, PreTrainedTokenizerFast))
|
||||
|
||||
tokens = tiktoken_tokenizer.encode(test_text, add_special_tokens=True)
|
||||
self.assertEqual(tokens, test_tokens)
|
||||
|
||||
tmpdirname = tempfile.mkdtemp()
|
||||
tiktoken_tokenizer.save_pretrained(tmpdirname)
|
||||
tokenizer_reload = AutoTokenizer.from_pretrained(tmpdirname)
|
||||
|
||||
self.assertTrue(isinstance(tokenizer_reload, PreTrainedTokenizerFast))
|
||||
tokens = tokenizer_reload.encode(test_text, add_special_tokens=True)
|
||||
self.assertEqual(tokens, test_tokens)
|
||||
shutil.rmtree(tmpdirname)
|
||||
|
||||
tiktoken_tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_path,
|
||||
subfolder=subfolder,
|
||||
additional_special_tokens=special_tokens,
|
||||
bos_token="<|begin_of_text|>",
|
||||
eos_token="<|end_of_text|>",
|
||||
from_slow=True,
|
||||
add_bos_token=True,
|
||||
add_eos_token=True,
|
||||
)
|
||||
tokens = tiktoken_tokenizer.encode(test_text, add_special_tokens=True)
|
||||
self.assertEqual(tokens, test_tokens)
|
||||
Reference in New Issue
Block a user