init
This commit is contained in:
0
transformers/tests/models/codegen/__init__.py
Normal file
0
transformers/tests/models/codegen/__init__.py
Normal file
492
transformers/tests/models/codegen/test_modeling_codegen.py
Normal file
492
transformers/tests/models/codegen/test_modeling_codegen.py
Normal file
@@ -0,0 +1,492 @@
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
from functools import cached_property
|
||||
|
||||
from transformers import CodeGenConfig, is_torch_available
|
||||
from transformers.testing_utils import backend_manual_seed, require_torch, slow, torch_device
|
||||
|
||||
from ...generation.test_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import AutoTokenizer, CodeGenForCausalLM, CodeGenModel
|
||||
|
||||
|
||||
class CodeGenModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=14,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_token_type_ids=True,
|
||||
use_input_mask=True,
|
||||
use_labels=True,
|
||||
use_mc_token_ids=True,
|
||||
vocab_size=256,
|
||||
hidden_size=32,
|
||||
rotary_dim=4,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=4,
|
||||
intermediate_size=37,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.0,
|
||||
attention_probs_dropout_prob=0.0,
|
||||
max_position_embeddings=512,
|
||||
type_vocab_size=16,
|
||||
type_sequence_label_size=2,
|
||||
initializer_range=0.02,
|
||||
num_labels=3,
|
||||
num_choices=4,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_token_type_ids = use_token_type_ids
|
||||
self.use_input_mask = use_input_mask
|
||||
self.use_labels = use_labels
|
||||
self.use_mc_token_ids = use_mc_token_ids
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.rotary_dim = rotary_dim
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.type_vocab_size = type_vocab_size
|
||||
self.type_sequence_label_size = type_sequence_label_size
|
||||
self.initializer_range = initializer_range
|
||||
self.num_labels = num_labels
|
||||
self.num_choices = num_choices
|
||||
self.scope = None
|
||||
self.bos_token_id = vocab_size - 1
|
||||
self.eos_token_id = vocab_size - 1
|
||||
self.pad_token_id = vocab_size - 1
|
||||
|
||||
def get_large_model_config(self):
|
||||
return CodeGenConfig.from_pretrained("Salesforce/codegen-2B-mono")
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
input_mask = None
|
||||
if self.use_input_mask:
|
||||
input_mask = random_attention_mask([self.batch_size, self.seq_length])
|
||||
|
||||
token_type_ids = None
|
||||
if self.use_token_type_ids:
|
||||
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
|
||||
|
||||
mc_token_ids = None
|
||||
if self.use_mc_token_ids:
|
||||
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
|
||||
|
||||
sequence_labels = None
|
||||
token_labels = None
|
||||
choice_labels = None
|
||||
if self.use_labels:
|
||||
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
|
||||
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
|
||||
choice_labels = ids_tensor([self.batch_size], self.num_choices)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
input_mask,
|
||||
head_mask,
|
||||
token_type_ids,
|
||||
mc_token_ids,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return CodeGenConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
n_embd=self.hidden_size,
|
||||
n_layer=self.num_hidden_layers,
|
||||
n_head=self.num_attention_heads,
|
||||
intermediate_size=self.intermediate_size,
|
||||
hidden_act=self.hidden_act,
|
||||
hidden_dropout_prob=self.hidden_dropout_prob,
|
||||
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
|
||||
n_positions=self.max_position_embeddings,
|
||||
type_vocab_size=self.type_vocab_size,
|
||||
initializer_range=self.initializer_range,
|
||||
use_cache=True,
|
||||
bos_token_id=self.bos_token_id,
|
||||
eos_token_id=self.eos_token_id,
|
||||
pad_token_id=self.pad_token_id,
|
||||
rotary_dim=self.rotary_dim,
|
||||
)
|
||||
|
||||
def create_and_check_codegen_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
|
||||
model = CodeGenModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
|
||||
result = model(input_ids, token_type_ids=token_type_ids)
|
||||
result = model(input_ids)
|
||||
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
|
||||
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
|
||||
|
||||
def create_and_check_codegen_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
|
||||
model = CodeGenModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
# first forward pass
|
||||
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
|
||||
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
|
||||
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
|
||||
|
||||
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
|
||||
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
|
||||
|
||||
output, past = outputs.to_tuple()
|
||||
|
||||
# create hypothetical next token and extent to next_input_ids
|
||||
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
|
||||
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
|
||||
|
||||
# append to next input_ids and token_type_ids
|
||||
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
|
||||
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
|
||||
|
||||
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
|
||||
output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[
|
||||
"last_hidden_state"
|
||||
]
|
||||
|
||||
# select random slice
|
||||
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
|
||||
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
|
||||
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
|
||||
|
||||
# test that outputs are equal for slice
|
||||
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
|
||||
|
||||
def create_and_check_codegen_model_attention_mask_past(
|
||||
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
|
||||
):
|
||||
model = CodeGenModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
# create attention mask
|
||||
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
|
||||
half_seq_length = self.seq_length // 2
|
||||
attn_mask[:, half_seq_length:] = 0
|
||||
|
||||
# first forward pass
|
||||
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
|
||||
|
||||
# create hypothetical next token and extent to next_input_ids
|
||||
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
|
||||
|
||||
# change a random masked slice from input_ids
|
||||
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
|
||||
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
|
||||
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
|
||||
|
||||
# append to next input_ids and attn_mask
|
||||
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
|
||||
attn_mask = torch.cat(
|
||||
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
# get two different outputs
|
||||
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
|
||||
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
|
||||
|
||||
# select random slice
|
||||
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
|
||||
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
|
||||
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
|
||||
|
||||
# test that outputs are equal for slice
|
||||
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
|
||||
|
||||
def create_and_check_codegen_model_past_large_inputs(
|
||||
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
|
||||
):
|
||||
model = CodeGenModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
# first forward pass
|
||||
outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True)
|
||||
|
||||
output, past = outputs.to_tuple()
|
||||
|
||||
# create hypothetical next token and extent to next_input_ids
|
||||
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
|
||||
next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size)
|
||||
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
|
||||
|
||||
# append to next input_ids and token_type_ids
|
||||
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
|
||||
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
|
||||
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
|
||||
|
||||
output_from_no_past = model(
|
||||
next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask
|
||||
)["last_hidden_state"]
|
||||
output_from_past = model(
|
||||
next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past
|
||||
)["last_hidden_state"]
|
||||
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
|
||||
|
||||
# select random slice
|
||||
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
|
||||
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
|
||||
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
|
||||
|
||||
# test that outputs are equal for slice
|
||||
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
|
||||
|
||||
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
|
||||
model = CodeGenForCausalLM(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
|
||||
self.parent.assertEqual(result.loss.shape, ())
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
|
||||
|
||||
def create_and_check_forward_and_backwards(
|
||||
self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False
|
||||
):
|
||||
model = CodeGenForCausalLM(config)
|
||||
if gradient_checkpointing:
|
||||
model.gradient_checkpointing_enable()
|
||||
model.to(torch_device)
|
||||
|
||||
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
|
||||
self.parent.assertEqual(result.loss.shape, ())
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
|
||||
result.loss.backward()
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
input_mask,
|
||||
head_mask,
|
||||
token_type_ids,
|
||||
mc_token_ids,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
) = config_and_inputs
|
||||
|
||||
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
|
||||
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class CodeGenModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (CodeGenModel, CodeGenForCausalLM) if is_torch_available() else ()
|
||||
pipeline_model_mapping = (
|
||||
{"feature-extraction": CodeGenModel, "text-generation": CodeGenForCausalLM} if is_torch_available() else {}
|
||||
)
|
||||
fx_compatible = False
|
||||
test_pruning = False
|
||||
test_missing_keys = False
|
||||
test_model_parallel = False
|
||||
test_head_masking = False
|
||||
|
||||
# special case for DoubleHeads model
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
|
||||
return inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = CodeGenModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=CodeGenConfig, n_embd=37)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_codegen_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_codegen_model(*config_and_inputs)
|
||||
|
||||
def test_codegen_model_past(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_codegen_model_past(*config_and_inputs)
|
||||
|
||||
def test_codegen_model_att_mask_past(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_codegen_model_attention_mask_past(*config_and_inputs)
|
||||
|
||||
def test_codegen_model_past_large_inputs(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_codegen_model_past_large_inputs(*config_and_inputs)
|
||||
|
||||
def test_codegen_lm_head_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
|
||||
|
||||
def test_codegen_gradient_checkpointing(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
|
||||
|
||||
@slow
|
||||
def test_batch_generation(self):
|
||||
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
model = CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
model.to(torch_device)
|
||||
|
||||
tokenizer.padding_side = "left"
|
||||
|
||||
# Define PAD Token = EOS Token = 50256
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
model.config.pad_token_id = model.config.eos_token_id
|
||||
|
||||
# use different length sentences to test batching
|
||||
sentences = ["def hello_world():", "def greet(name):"]
|
||||
|
||||
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
|
||||
input_ids = inputs["input_ids"].to(torch_device)
|
||||
token_type_ids = torch.cat(
|
||||
[
|
||||
input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0),
|
||||
input_ids.new_full((input_ids.shape[0], 1), 500),
|
||||
],
|
||||
dim=-1,
|
||||
)
|
||||
|
||||
outputs = model.generate(
|
||||
input_ids=input_ids,
|
||||
attention_mask=inputs["attention_mask"].to(torch_device),
|
||||
)
|
||||
|
||||
outputs_tt = model.generate(
|
||||
input_ids=input_ids,
|
||||
attention_mask=inputs["attention_mask"].to(torch_device),
|
||||
token_type_ids=token_type_ids,
|
||||
)
|
||||
|
||||
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
|
||||
output_non_padded = model.generate(input_ids=inputs_non_padded)
|
||||
|
||||
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item()
|
||||
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
|
||||
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
|
||||
|
||||
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True)
|
||||
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
|
||||
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
|
||||
|
||||
expected_output_sentence = [
|
||||
'def hello_world():\n print("Hello World")\n\nhellow_world()',
|
||||
'def greet(name):\n print(f"Hello {name}")\n\ng',
|
||||
]
|
||||
self.assertListEqual(expected_output_sentence, batch_out_sentence)
|
||||
self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output
|
||||
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
model_name = "Salesforce/codegen-350M-nl"
|
||||
model = CodeGenModel.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
|
||||
@require_torch
|
||||
class CodeGenModelLanguageGenerationTest(unittest.TestCase):
|
||||
@cached_property
|
||||
def cached_tokenizer(self):
|
||||
return AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
|
||||
@cached_property
|
||||
def cached_model(self):
|
||||
return CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
|
||||
@slow
|
||||
def test_lm_generate_codegen(self):
|
||||
tokenizer = self.cached_tokenizer
|
||||
for checkpointing in [True, False]:
|
||||
model = self.cached_model
|
||||
|
||||
if checkpointing:
|
||||
model.gradient_checkpointing_enable()
|
||||
else:
|
||||
model.gradient_checkpointing_disable()
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = tokenizer("def hello_world():", return_tensors="pt").to(torch_device)
|
||||
expected_output = 'def hello_world():\n print("Hello World")\n\nhello_world()\n\n'
|
||||
|
||||
output_ids = model.generate(**inputs, do_sample=False)
|
||||
output_str = tokenizer.batch_decode(output_ids)[0]
|
||||
|
||||
self.assertEqual(output_str, expected_output)
|
||||
|
||||
@slow
|
||||
def test_codegen_sample(self):
|
||||
tokenizer = self.cached_tokenizer
|
||||
model = self.cached_model
|
||||
model.to(torch_device)
|
||||
|
||||
torch.manual_seed(0)
|
||||
backend_manual_seed(torch_device, 0)
|
||||
|
||||
tokenized = tokenizer("def hello_world():", return_tensors="pt", return_token_type_ids=True)
|
||||
input_ids = tokenized.input_ids.to(torch_device)
|
||||
output_ids = model.generate(input_ids, do_sample=True)
|
||||
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
||||
|
||||
token_type_ids = tokenized.token_type_ids.to(torch_device)
|
||||
output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5)
|
||||
output_seq_tt = model.generate(
|
||||
input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5
|
||||
)
|
||||
output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True)
|
||||
output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True)
|
||||
|
||||
if torch_device == "cuda":
|
||||
EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n return True\n\nresult ='
|
||||
else:
|
||||
EXPECTED_OUTPUT_STR = "def hello_world():\r\n print('Hello, World.')\r\n\r\n\r"
|
||||
|
||||
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
|
||||
self.assertTrue(
|
||||
all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs)))
|
||||
) # token_type_ids should change output
|
||||
324
transformers/tests/models/codegen/test_tokenization_codegen.py
Normal file
324
transformers/tests/models/codegen/test_tokenization_codegen.py
Normal file
@@ -0,0 +1,324 @@
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import unittest
|
||||
|
||||
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
|
||||
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
|
||||
from transformers.testing_utils import require_tokenizers, slow
|
||||
|
||||
from ...test_tokenization_common import TokenizerTesterMixin
|
||||
|
||||
|
||||
@require_tokenizers
|
||||
class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
||||
from_pretrained_id = "Salesforce/codegen-350M-mono"
|
||||
tokenizer_class = CodeGenTokenizer
|
||||
rust_tokenizer_class = CodeGenTokenizerFast
|
||||
test_rust_tokenizer = True
|
||||
from_pretrained_kwargs = {"add_prefix_space": True}
|
||||
test_seq2seq = False
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super().setUpClass()
|
||||
|
||||
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
|
||||
vocab = [
|
||||
"l",
|
||||
"o",
|
||||
"w",
|
||||
"e",
|
||||
"r",
|
||||
"s",
|
||||
"t",
|
||||
"i",
|
||||
"d",
|
||||
"n",
|
||||
"\u0120",
|
||||
"\u0120l",
|
||||
"\u0120n",
|
||||
"\u0120lo",
|
||||
"\u0120low",
|
||||
"er",
|
||||
"\u0120lowest",
|
||||
"\u0120newer",
|
||||
"\u0120wider",
|
||||
"<unk>",
|
||||
"<|endoftext|>",
|
||||
]
|
||||
vocab_tokens = dict(zip(vocab, range(len(vocab))))
|
||||
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
|
||||
cls.special_tokens_map = {"unk_token": "<unk>"}
|
||||
|
||||
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
|
||||
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
|
||||
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
|
||||
fp.write(json.dumps(vocab_tokens) + "\n")
|
||||
with open(cls.merges_file, "w", encoding="utf-8") as fp:
|
||||
fp.write("\n".join(merges))
|
||||
|
||||
@classmethod
|
||||
def get_tokenizer(cls, pretrained_name=None, **kwargs):
|
||||
kwargs.update(cls.special_tokens_map)
|
||||
pretrained_name = pretrained_name or cls.tmpdirname
|
||||
return CodeGenTokenizer.from_pretrained(pretrained_name, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def get_rust_tokenizer(cls, pretrained_name=None, **kwargs):
|
||||
kwargs.update(cls.special_tokens_map)
|
||||
pretrained_name = pretrained_name or cls.tmpdirname
|
||||
return CodeGenTokenizerFast.from_pretrained(pretrained_name, **kwargs)
|
||||
|
||||
def get_input_output_texts(self, tokenizer):
|
||||
input_text = "lower newer"
|
||||
output_text = "lower newer"
|
||||
return input_text, output_text
|
||||
|
||||
def test_full_tokenizer(self):
|
||||
tokenizer = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
|
||||
text = "lower newer"
|
||||
bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
|
||||
tokens = tokenizer.tokenize(text, add_prefix_space=True)
|
||||
self.assertListEqual(tokens, bpe_tokens)
|
||||
|
||||
input_tokens = tokens + [tokenizer.unk_token]
|
||||
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
|
||||
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
||||
|
||||
def test_rust_and_python_full_tokenizers(self):
|
||||
if not self.test_rust_tokenizer:
|
||||
self.skipTest(reason="test_rust_tokenizer is set to False")
|
||||
|
||||
tokenizer = self.get_tokenizer()
|
||||
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
|
||||
|
||||
sequence = "lower newer"
|
||||
|
||||
# Testing tokenization
|
||||
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
|
||||
rust_tokens = rust_tokenizer.tokenize(sequence)
|
||||
self.assertListEqual(tokens, rust_tokens)
|
||||
|
||||
# Testing conversion to ids without special tokens
|
||||
ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
|
||||
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
|
||||
self.assertListEqual(ids, rust_ids)
|
||||
|
||||
# Testing conversion to ids with special tokens
|
||||
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
|
||||
ids = tokenizer.encode(sequence, add_prefix_space=True)
|
||||
rust_ids = rust_tokenizer.encode(sequence)
|
||||
self.assertListEqual(ids, rust_ids)
|
||||
|
||||
# Testing the unknown token
|
||||
input_tokens = tokens + [rust_tokenizer.unk_token]
|
||||
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
|
||||
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
||||
|
||||
@unittest.skip
|
||||
def test_pretokenized_inputs(self, *args, **kwargs):
|
||||
# It's very difficult to mix/test pretokenization with byte-level
|
||||
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
|
||||
pass
|
||||
|
||||
def test_padding(self, max_length=15):
|
||||
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
|
||||
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
|
||||
tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs)
|
||||
|
||||
# Simple input
|
||||
s = "This is a simple input"
|
||||
s2 = ["This is a simple input 1", "This is a simple input 2"]
|
||||
p = ("This is a simple input", "This is a pair")
|
||||
p2 = [
|
||||
("This is a simple input 1", "This is a simple input 2"),
|
||||
("This is a simple pair 1", "This is a simple pair 2"),
|
||||
]
|
||||
|
||||
# Simple input tests
|
||||
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
|
||||
|
||||
# Simple input
|
||||
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
|
||||
|
||||
# Simple input
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
tokenizer_r.batch_encode_plus,
|
||||
s2,
|
||||
max_length=max_length,
|
||||
padding="max_length",
|
||||
)
|
||||
|
||||
# Pair input
|
||||
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
|
||||
|
||||
# Pair input
|
||||
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
|
||||
|
||||
# Pair input
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
tokenizer_r.batch_encode_plus,
|
||||
p2,
|
||||
max_length=max_length,
|
||||
padding="max_length",
|
||||
)
|
||||
|
||||
def test_padding_if_pad_token_set_slow(self):
|
||||
tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>")
|
||||
|
||||
# Simple input
|
||||
s = "This is a simple input"
|
||||
s2 = ["This is a simple input looooooooong", "This is a simple input"]
|
||||
p = ("This is a simple input", "This is a pair")
|
||||
p2 = [
|
||||
("This is a simple input loooooong", "This is a simple input"),
|
||||
("This is a simple pair loooooong", "This is a simple pair"),
|
||||
]
|
||||
|
||||
pad_token_id = tokenizer.pad_token_id
|
||||
|
||||
out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np")
|
||||
out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np")
|
||||
out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np")
|
||||
out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np")
|
||||
|
||||
# s
|
||||
# test single string max_length padding
|
||||
self.assertEqual(out_s["input_ids"].shape[-1], 30)
|
||||
self.assertTrue(pad_token_id in out_s["input_ids"])
|
||||
self.assertTrue(0 in out_s["attention_mask"])
|
||||
|
||||
# s2
|
||||
# test automatic padding
|
||||
self.assertEqual(out_s2["input_ids"].shape[-1], 33)
|
||||
# long slice doesn't have padding
|
||||
self.assertFalse(pad_token_id in out_s2["input_ids"][0])
|
||||
self.assertFalse(0 in out_s2["attention_mask"][0])
|
||||
# short slice does have padding
|
||||
self.assertTrue(pad_token_id in out_s2["input_ids"][1])
|
||||
self.assertTrue(0 in out_s2["attention_mask"][1])
|
||||
|
||||
# p
|
||||
# test single pair max_length padding
|
||||
self.assertEqual(out_p["input_ids"].shape[-1], 60)
|
||||
self.assertTrue(pad_token_id in out_p["input_ids"])
|
||||
self.assertTrue(0 in out_p["attention_mask"])
|
||||
|
||||
# p2
|
||||
# test automatic padding pair
|
||||
self.assertEqual(out_p2["input_ids"].shape[-1], 52)
|
||||
# long slice pair doesn't have padding
|
||||
self.assertFalse(pad_token_id in out_p2["input_ids"][0])
|
||||
self.assertFalse(0 in out_p2["attention_mask"][0])
|
||||
# short slice pair does have padding
|
||||
self.assertTrue(pad_token_id in out_p2["input_ids"][1])
|
||||
self.assertTrue(0 in out_p2["attention_mask"][1])
|
||||
|
||||
def test_add_bos_token_slow(self):
|
||||
bos_token = "$$$"
|
||||
tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True)
|
||||
|
||||
s = "This is a simple input"
|
||||
s2 = ["This is a simple input 1", "This is a simple input 2"]
|
||||
|
||||
bos_token_id = tokenizer.bos_token_id
|
||||
|
||||
out_s = tokenizer(s)
|
||||
out_s2 = tokenizer(s2)
|
||||
|
||||
self.assertEqual(out_s.input_ids[0], bos_token_id)
|
||||
self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids))
|
||||
|
||||
decode_s = tokenizer.decode(out_s.input_ids)
|
||||
decode_s2 = tokenizer.batch_decode(out_s2.input_ids)
|
||||
|
||||
self.assertTrue(decode_s.startswith(bos_token))
|
||||
self.assertTrue(all(d.startswith(bos_token) for d in decode_s2))
|
||||
|
||||
@slow
|
||||
def test_truncation(self):
|
||||
tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
|
||||
text = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
|
||||
expected_truncated_text = "\nif len_a > len_b:\n result = a\nelse:\n result = b"
|
||||
|
||||
input_ids = tokenizer.encode(text)
|
||||
truncation_pattern = ["^#", re.escape("<|endoftext|>"), "^'''", '^"""', "\n\n\n"]
|
||||
decoded_text = tokenizer.decode(input_ids, truncate_before_pattern=truncation_pattern)
|
||||
self.assertEqual(decoded_text, expected_truncated_text)
|
||||
# TODO @ArthurZ outputs of the fast tokenizer are different in this case, un-related to the PR
|
||||
|
||||
# tokenizer has no padding token
|
||||
@unittest.skip(reason="tokenizer has no padding token")
|
||||
def test_padding_different_model_input_name(self):
|
||||
pass
|
||||
|
||||
@slow
|
||||
def test_tokenizer_integration(self):
|
||||
# Custom test since this tokenizer takes return_token_type_ids as an init argument for backward compatibility.
|
||||
|
||||
sequences = [
|
||||
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
|
||||
"general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
|
||||
"Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained "
|
||||
"models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.",
|
||||
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
|
||||
"conditioning on both left and right context in all layers.",
|
||||
"The quick brown fox jumps over the lazy dog.",
|
||||
]
|
||||
|
||||
tokenizer_classes = [self.tokenizer_class]
|
||||
if self.test_rust_tokenizer:
|
||||
tokenizer_classes.append(self.rust_tokenizer_class)
|
||||
|
||||
# Test default case. i.e. return_token_type_ids is False.
|
||||
for tokenizer_class in tokenizer_classes:
|
||||
tokenizer = tokenizer_class.from_pretrained("Salesforce/codegen-350M-mono")
|
||||
|
||||
encoding = tokenizer(sequences)
|
||||
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]]
|
||||
|
||||
# fmt: off
|
||||
expected_encoding = {'input_ids': [[41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13], [13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13], [464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
|
||||
# fmt: on
|
||||
|
||||
encoding_data = encoding.data
|
||||
self.assertDictEqual(encoding_data, expected_encoding)
|
||||
|
||||
for expected, decoded in zip(sequences, decoded_sequences):
|
||||
self.assertEqual(expected, decoded)
|
||||
|
||||
# Test return_token_type_ids is True case.
|
||||
for tokenizer_class in tokenizer_classes:
|
||||
tokenizer = tokenizer_class.from_pretrained("Salesforce/codegen-350M-mono", return_token_type_ids=True)
|
||||
|
||||
encoding = tokenizer(sequences)
|
||||
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]]
|
||||
|
||||
# fmt: off
|
||||
expected_encoding = {'input_ids': [[41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13], [13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13], [464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
|
||||
# fmt: on
|
||||
|
||||
encoding_data = encoding.data
|
||||
self.assertDictEqual(encoding_data, expected_encoding)
|
||||
|
||||
for expected, decoded in zip(sequences, decoded_sequences):
|
||||
self.assertEqual(expected, decoded)
|
||||
Reference in New Issue
Block a user