init
This commit is contained in:
0
transformers/tests/trainer/__init__.py
Normal file
0
transformers/tests/trainer/__init__.py
Normal file
1967
transformers/tests/trainer/test_data_collator.py
Normal file
1967
transformers/tests/trainer/test_data_collator.py
Normal file
File diff suppressed because it is too large
Load Diff
6390
transformers/tests/trainer/test_trainer.py
Normal file
6390
transformers/tests/trainer/test_trainer.py
Normal file
File diff suppressed because it is too large
Load Diff
445
transformers/tests/trainer/test_trainer_callback.py
Normal file
445
transformers/tests/trainer/test_trainer_callback.py
Normal file
@@ -0,0 +1,445 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from transformers import (
|
||||
DefaultFlowCallback,
|
||||
EarlyStoppingCallback,
|
||||
IntervalStrategy,
|
||||
PrinterCallback,
|
||||
ProgressCallback,
|
||||
Trainer,
|
||||
TrainerCallback,
|
||||
TrainerState,
|
||||
TrainingArguments,
|
||||
is_torch_available,
|
||||
)
|
||||
from transformers.testing_utils import require_torch
|
||||
from transformers.trainer_callback import ExportableState
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from transformers.trainer import DEFAULT_CALLBACKS, TRAINER_STATE_NAME
|
||||
|
||||
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
|
||||
|
||||
|
||||
class MyTestExportableCallback(TrainerCallback, ExportableState):
|
||||
def __init__(self, my_test_state="test"):
|
||||
self.my_test_state = my_test_state
|
||||
|
||||
def state(self):
|
||||
return {
|
||||
"args": {
|
||||
"my_test_state": self.my_test_state,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class MyTestTrainerCallback(TrainerCallback):
|
||||
"A callback that registers the events that goes through."
|
||||
|
||||
def __init__(self, my_test_state="test"):
|
||||
self.events = []
|
||||
self.my_test_state = my_test_state
|
||||
|
||||
def on_init_end(self, args, state, control, **kwargs):
|
||||
self.events.append("on_init_end")
|
||||
|
||||
def on_train_begin(self, args, state, control, **kwargs):
|
||||
self.events.append("on_train_begin")
|
||||
|
||||
def on_train_end(self, args, state, control, **kwargs):
|
||||
self.events.append("on_train_end")
|
||||
|
||||
def on_epoch_begin(self, args, state, control, **kwargs):
|
||||
self.events.append("on_epoch_begin")
|
||||
|
||||
def on_epoch_end(self, args, state, control, **kwargs):
|
||||
self.events.append("on_epoch_end")
|
||||
|
||||
def on_step_begin(self, args, state, control, **kwargs):
|
||||
self.events.append("on_step_begin")
|
||||
|
||||
def on_pre_optimizer_step(self, args, state, control, **kwargs):
|
||||
self.events.append("on_pre_optimizer_step")
|
||||
|
||||
def on_optimizer_step(self, args, state, control, **kwargs):
|
||||
self.events.append("on_optimizer_step")
|
||||
|
||||
def on_step_end(self, args, state, control, **kwargs):
|
||||
self.events.append("on_step_end")
|
||||
|
||||
def on_evaluate(self, args, state, control, **kwargs):
|
||||
self.events.append("on_evaluate")
|
||||
|
||||
def on_predict(self, args, state, control, **kwargs):
|
||||
self.events.append("on_predict")
|
||||
|
||||
def on_save(self, args, state, control, **kwargs):
|
||||
self.events.append("on_save")
|
||||
|
||||
def on_log(self, args, state, control, **kwargs):
|
||||
self.events.append("on_log")
|
||||
|
||||
def on_prediction_step(self, args, state, control, **kwargs):
|
||||
self.events.append("on_prediction_step")
|
||||
|
||||
|
||||
@require_torch
|
||||
class TrainerCallbackTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.output_dir = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.output_dir)
|
||||
|
||||
def get_trainer(self, a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs):
|
||||
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
|
||||
# its set to False since the tests later on depend on its value.
|
||||
train_dataset = RegressionDataset(length=train_len)
|
||||
eval_dataset = RegressionDataset(length=eval_len)
|
||||
config = RegressionModelConfig(a=a, b=b)
|
||||
model = RegressionPreTrainedModel(config)
|
||||
|
||||
args = TrainingArguments(self.output_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs)
|
||||
return Trainer(
|
||||
model,
|
||||
args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
callbacks=callbacks,
|
||||
)
|
||||
|
||||
def check_callbacks_equality(self, cbs1, cbs2):
|
||||
self.assertEqual(len(cbs1), len(cbs2))
|
||||
|
||||
# Order doesn't matter
|
||||
cbs1 = sorted(cbs1, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
|
||||
cbs2 = sorted(cbs2, key=lambda cb: cb.__name__ if isinstance(cb, type) else cb.__class__.__name__)
|
||||
|
||||
for cb1, cb2 in zip(cbs1, cbs2):
|
||||
if isinstance(cb1, type) and isinstance(cb2, type):
|
||||
self.assertEqual(cb1, cb2)
|
||||
elif isinstance(cb1, type) and not isinstance(cb2, type):
|
||||
self.assertEqual(cb1, cb2.__class__)
|
||||
elif not isinstance(cb1, type) and isinstance(cb2, type):
|
||||
self.assertEqual(cb1.__class__, cb2)
|
||||
else:
|
||||
self.assertEqual(cb1, cb2)
|
||||
|
||||
def get_expected_events(self, trainer):
|
||||
expected_events = ["on_init_end", "on_train_begin"]
|
||||
step = 0
|
||||
train_dl_len = len(trainer.get_eval_dataloader())
|
||||
evaluation_events = ["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
|
||||
for _ in range(trainer.state.num_train_epochs):
|
||||
expected_events.append("on_epoch_begin")
|
||||
for _ in range(train_dl_len):
|
||||
step += 1
|
||||
expected_events += ["on_step_begin", "on_pre_optimizer_step", "on_optimizer_step", "on_step_end"]
|
||||
if step % trainer.args.logging_steps == 0:
|
||||
expected_events.append("on_log")
|
||||
if trainer.args.eval_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
|
||||
expected_events += evaluation_events.copy()
|
||||
if step % trainer.args.save_steps == 0 or step == trainer.state.max_steps:
|
||||
expected_events.append("on_save")
|
||||
expected_events.append("on_epoch_end")
|
||||
if trainer.args.eval_strategy == IntervalStrategy.EPOCH:
|
||||
expected_events += evaluation_events.copy()
|
||||
expected_events += ["on_log", "on_train_end"]
|
||||
return expected_events
|
||||
|
||||
def test_init_callback(self):
|
||||
trainer = self.get_trainer()
|
||||
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
# Callbacks passed at init are added to the default callbacks
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
|
||||
expected_callbacks.append(MyTestTrainerCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
|
||||
trainer = self.get_trainer(disable_tqdm=True)
|
||||
expected_callbacks = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
def test_add_remove_callback(self):
|
||||
expected_callbacks = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
|
||||
trainer = self.get_trainer()
|
||||
|
||||
# We can add, pop, or remove by class name
|
||||
trainer.remove_callback(DefaultFlowCallback)
|
||||
expected_callbacks.remove(DefaultFlowCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
trainer = self.get_trainer()
|
||||
cb = trainer.pop_callback(DefaultFlowCallback)
|
||||
self.assertEqual(cb.__class__, DefaultFlowCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
trainer.add_callback(DefaultFlowCallback)
|
||||
expected_callbacks.insert(0, DefaultFlowCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
# We can also add, pop, or remove by instance
|
||||
trainer = self.get_trainer()
|
||||
cb = trainer.callback_handler.callbacks[0]
|
||||
trainer.remove_callback(cb)
|
||||
expected_callbacks.remove(DefaultFlowCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
trainer = self.get_trainer()
|
||||
cb1 = trainer.callback_handler.callbacks[0]
|
||||
cb2 = trainer.pop_callback(cb1)
|
||||
self.assertEqual(cb1, cb2)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
trainer.add_callback(cb1)
|
||||
expected_callbacks.insert(0, DefaultFlowCallback)
|
||||
self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks)
|
||||
|
||||
def test_event_flow(self):
|
||||
import warnings
|
||||
|
||||
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter(action="ignore", category=UserWarning)
|
||||
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback])
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
# Independent log/save/eval
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5)
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5)
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps")
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch")
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
# A bit of everything
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[MyTestTrainerCallback],
|
||||
logging_steps=3,
|
||||
save_steps=10,
|
||||
eval_steps=5,
|
||||
eval_strategy="steps",
|
||||
)
|
||||
trainer.train()
|
||||
events = trainer.callback_handler.callbacks[-2].events
|
||||
self.assertEqual(events, self.get_expected_events(trainer))
|
||||
|
||||
# warning should be emitted for duplicated callbacks
|
||||
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback],
|
||||
)
|
||||
assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0]
|
||||
|
||||
def test_stateful_callbacks(self):
|
||||
# Use something with non-defaults
|
||||
cb = EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2)
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[cb],
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
)
|
||||
trainer.train()
|
||||
|
||||
# Create a new trainer with defaults
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[EarlyStoppingCallback()],
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
restore_callback_states_from_checkpoint=True,
|
||||
)
|
||||
# Load it back in and verify values
|
||||
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
|
||||
trainer.train(resume_from_checkpoint=checkpoint)
|
||||
cb = [
|
||||
callback for callback in trainer.callback_handler.callbacks if isinstance(callback, EarlyStoppingCallback)
|
||||
][0]
|
||||
assert cb.early_stopping_patience == 5
|
||||
assert cb.early_stopping_threshold == 0.2
|
||||
|
||||
def test_stateful_mixed_callbacks(self):
|
||||
# Use two callbacks, one stateful one not
|
||||
# Use something with non-defaults
|
||||
cbs = [
|
||||
MyTestTrainerCallback(my_test_state="another value"),
|
||||
EarlyStoppingCallback(early_stopping_patience=5, early_stopping_threshold=0.2),
|
||||
]
|
||||
trainer = self.get_trainer(
|
||||
callbacks=cbs,
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
)
|
||||
trainer.train()
|
||||
|
||||
# Create a new trainer with defaults
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[EarlyStoppingCallback(), MyTestTrainerCallback()],
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
restore_callback_states_from_checkpoint=True,
|
||||
)
|
||||
# Load it back in and verify values
|
||||
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
|
||||
trainer.train(resume_from_checkpoint=checkpoint)
|
||||
cbs = [
|
||||
callback
|
||||
for callback in trainer.callback_handler.callbacks
|
||||
if isinstance(callback, (EarlyStoppingCallback, MyTestTrainerCallback))
|
||||
]
|
||||
assert len(cbs) == 2
|
||||
my_test, early_stopping = cbs
|
||||
assert early_stopping.early_stopping_patience == 5
|
||||
assert early_stopping.early_stopping_threshold == 0.2
|
||||
assert my_test.my_test_state == "test"
|
||||
|
||||
def test_stateful_duplicate_callbacks(self):
|
||||
# Use something with non-defaults
|
||||
cbs = [MyTestExportableCallback("first"), MyTestExportableCallback("second")]
|
||||
trainer = self.get_trainer(
|
||||
callbacks=cbs,
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
)
|
||||
trainer.train()
|
||||
|
||||
# Create a new trainer with defaults
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[MyTestExportableCallback(), MyTestExportableCallback()],
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
restore_callback_states_from_checkpoint=True,
|
||||
)
|
||||
# Load it back in and verify values
|
||||
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
|
||||
trainer.train(resume_from_checkpoint=checkpoint)
|
||||
cbs = [
|
||||
callback
|
||||
for callback in trainer.callback_handler.callbacks
|
||||
if isinstance(callback, MyTestExportableCallback)
|
||||
]
|
||||
assert len(cbs) == 2
|
||||
assert cbs[0].my_test_state == "first"
|
||||
assert cbs[1].my_test_state == "second"
|
||||
|
||||
def test_missing_stateful_callback(self):
|
||||
cb = EarlyStoppingCallback()
|
||||
trainer = self.get_trainer(
|
||||
callbacks=[cb],
|
||||
load_best_model_at_end=True,
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
)
|
||||
trainer.train()
|
||||
|
||||
# Create a new trainer with defaults
|
||||
trainer = self.get_trainer(
|
||||
save_strategy="steps",
|
||||
eval_strategy="steps",
|
||||
save_steps=2,
|
||||
eval_steps=2,
|
||||
max_steps=2,
|
||||
restore_callback_states_from_checkpoint=True,
|
||||
)
|
||||
# Load it back in and verify values
|
||||
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
|
||||
# warning should be emitted for not-present callbacks
|
||||
with patch("transformers.trainer.logger.warning") as warn_mock:
|
||||
trainer.train(resume_from_checkpoint=checkpoint)
|
||||
assert "EarlyStoppingCallback" in warn_mock.call_args[0][0]
|
||||
|
||||
def test_stateful_control(self):
|
||||
trainer = self.get_trainer(
|
||||
max_steps=2,
|
||||
save_strategy="steps",
|
||||
save_steps=2,
|
||||
)
|
||||
trainer.train()
|
||||
# Load it back in and verify values
|
||||
trainer = self.get_trainer(max_steps=2, restore_callback_states_from_checkpoint=True)
|
||||
checkpoint = os.path.join(self.output_dir, "checkpoint-2")
|
||||
trainer.state = TrainerState.load_from_json(os.path.join(checkpoint, TRAINER_STATE_NAME))
|
||||
trainer._load_callback_state()
|
||||
assert trainer.control.should_training_stop
|
||||
|
||||
def test_no_duplicate_save_on_epoch_save_strategy(self):
|
||||
times_saved = 0
|
||||
|
||||
class OnEndCallback(TrainerCallback):
|
||||
def on_step_end(self, args: TrainingArguments, state: TrainerState, control, **kwargs):
|
||||
nonlocal times_saved
|
||||
if control.should_save:
|
||||
times_saved += 1
|
||||
|
||||
def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control, **kwargs):
|
||||
nonlocal times_saved
|
||||
if control.should_save:
|
||||
times_saved += 1
|
||||
|
||||
trainer = self.get_trainer(max_steps=2, save_strategy="epoch", callbacks=[OnEndCallback])
|
||||
trainer.train()
|
||||
assert times_saved == 1
|
||||
207
transformers/tests/trainer/test_trainer_distributed.py
Normal file
207
transformers/tests/trainer/test_trainer_distributed.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
|
||||
from transformers.testing_utils import (
|
||||
TestCasePlus,
|
||||
backend_device_count,
|
||||
execute_subprocess_async,
|
||||
get_torch_dist_unique_port,
|
||||
require_torch_multi_accelerator,
|
||||
run_first,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.training_args import ParallelMode
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.utils.data import Dataset, IterableDataset
|
||||
|
||||
from transformers import Trainer
|
||||
|
||||
class DummyDataset(Dataset):
|
||||
def __init__(self, length: int = 101):
|
||||
self.length = length
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __getitem__(self, i) -> int:
|
||||
return i
|
||||
|
||||
class DummyDataCollator:
|
||||
def __call__(self, features):
|
||||
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
|
||||
|
||||
class DummyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Add some (unused) params otherwise DDP will complain.
|
||||
self.fc = nn.Linear(120, 80)
|
||||
|
||||
def forward(self, input_ids, labels=None):
|
||||
if labels is not None:
|
||||
return torch.tensor(0.0, device=input_ids.device), input_ids
|
||||
else:
|
||||
return input_ids
|
||||
|
||||
class RegressionModel(nn.Module):
|
||||
def __init__(self, a=0, b=0, double_output=False):
|
||||
super().__init__()
|
||||
self.a = nn.Parameter(torch.tensor(a).float())
|
||||
self.b = nn.Parameter(torch.tensor(b).float())
|
||||
self.double_output = double_output
|
||||
self.config = None
|
||||
|
||||
def forward(self, input_x, labels=None, **kwargs):
|
||||
y = input_x * self.a + self.b
|
||||
if labels is None:
|
||||
return (y, y) if self.double_output else (y,)
|
||||
loss = nn.functional.mse_loss(y, labels)
|
||||
return (loss, y, y) if self.double_output else (loss, y)
|
||||
|
||||
class SampleIterableDataset(IterableDataset):
|
||||
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
|
||||
self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(len(self.dataset)):
|
||||
yield self.dataset[i]
|
||||
|
||||
class FiniteIterableDataset(SampleIterableDataset):
|
||||
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
|
||||
super().__init__(a, b, length, seed, label_names)
|
||||
self.current_sample = 0
|
||||
|
||||
def __iter__(self):
|
||||
while self.current_sample < len(self.dataset):
|
||||
yield self.dataset[self.current_sample]
|
||||
self.current_sample += 1
|
||||
|
||||
class RegressionDataset:
|
||||
def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
|
||||
np.random.seed(seed)
|
||||
self.label_names = ["labels"] if label_names is None else label_names
|
||||
self.length = length
|
||||
self.x = np.random.normal(size=(length,)).astype(np.float32)
|
||||
self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
|
||||
self.ys = [y.astype(np.float32) for y in self.ys]
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __getitem__(self, i):
|
||||
result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
|
||||
result["input_x"] = self.x[i]
|
||||
return result
|
||||
|
||||
|
||||
class TestTrainerDistributed(TestCasePlus):
|
||||
@run_first
|
||||
@require_torch_multi_accelerator
|
||||
def test_trainer(self):
|
||||
distributed_args = f"""--nproc_per_node={backend_device_count(torch_device)}
|
||||
--master_port={get_torch_dist_unique_port()}
|
||||
{self.test_file_dir}/test_trainer_distributed.py
|
||||
""".split()
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
args = f"--output_dir {output_dir} --report_to none".split()
|
||||
cmd = ["torchrun"] + distributed_args + args
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
# successful return here == success - any errors would have caused an error in the sub-call
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
|
||||
#
|
||||
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
|
||||
|
||||
parser = HfArgumentParser((TrainingArguments,))
|
||||
training_args = parser.parse_args_into_dataclasses()[0]
|
||||
|
||||
logger.warning(
|
||||
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
|
||||
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
|
||||
)
|
||||
|
||||
# Essentially, what we want to verify in the distributed case is that we get all samples back,
|
||||
# in the right order. (this is crucial for prediction for instance)
|
||||
for dataset_length in [101, 40, 7]:
|
||||
dataset = DummyDataset(dataset_length)
|
||||
|
||||
def compute_metrics(p: EvalPrediction) -> dict:
|
||||
sequential = list(range(len(dataset)))
|
||||
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
|
||||
if not success and training_args.local_rank == 0:
|
||||
logger.warning(
|
||||
"Predictions and/or labels do not match expected results:\n - predictions: "
|
||||
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}"
|
||||
)
|
||||
return {"success": success}
|
||||
|
||||
trainer = Trainer(
|
||||
model=DummyModel(),
|
||||
args=training_args,
|
||||
data_collator=DummyDataCollator(),
|
||||
eval_dataset=dataset,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
metrics = trainer.evaluate()
|
||||
logger.info(metrics)
|
||||
if metrics["eval_success"] is not True:
|
||||
logger.error(metrics)
|
||||
exit(1)
|
||||
|
||||
p = trainer.predict(dataset)
|
||||
logger.info(p.metrics)
|
||||
if p.metrics["test_success"] is not True:
|
||||
logger.error(p.metrics)
|
||||
exit(1)
|
||||
|
||||
trainer.args.eval_accumulation_steps = 2
|
||||
|
||||
metrics = trainer.evaluate()
|
||||
logger.info(metrics)
|
||||
if metrics["eval_success"] is not True:
|
||||
logger.error(metrics)
|
||||
exit(1)
|
||||
|
||||
p = trainer.predict(dataset)
|
||||
logger.info(p.metrics)
|
||||
if p.metrics["test_success"] is not True:
|
||||
logger.error(p.metrics)
|
||||
exit(1)
|
||||
|
||||
trainer.args.eval_accumulation_steps = None
|
||||
|
||||
# Check that `dispatch_batches=False` will work on a finite iterable dataset
|
||||
|
||||
train_dataset = FiniteIterableDataset(label_names=["labels", "extra"], length=1)
|
||||
|
||||
model = RegressionModel()
|
||||
training_args.per_device_train_batch_size = 1
|
||||
training_args.max_steps = 1
|
||||
training_args.accelerator_config.dispatch_batches = False
|
||||
|
||||
trainer = Trainer(model, training_args, train_dataset=train_dataset)
|
||||
trainer.train()
|
||||
107
transformers/tests/trainer/test_trainer_distributed_loss.py
Normal file
107
transformers/tests/trainer/test_trainer_distributed_loss.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import json
|
||||
|
||||
import datasets
|
||||
|
||||
from tests.trainer.test_trainer import StoreLossCallback
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
DataCollatorForLanguageModeling,
|
||||
HfArgumentParser,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
TestCasePlus,
|
||||
backend_device_count,
|
||||
execute_subprocess_async,
|
||||
get_torch_dist_unique_port,
|
||||
require_torch_multi_accelerator,
|
||||
run_first,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
class TestTrainerDistributedLoss(TestCasePlus):
|
||||
@run_first
|
||||
@require_torch_multi_accelerator
|
||||
def test_trainer(self):
|
||||
device_count = backend_device_count(torch_device)
|
||||
min_bs = 2
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
for gpu_num, enable, bs, name in (
|
||||
(1, True, min_bs * device_count, "base"),
|
||||
(device_count, False, min_bs, "broken"),
|
||||
(device_count, True, min_bs, "fixed"),
|
||||
):
|
||||
distributed_args = f"""--nproc_per_node={gpu_num}
|
||||
--master_port={get_torch_dist_unique_port()}
|
||||
{self.test_file_dir}/test_trainer_distributed_loss.py
|
||||
""".split()
|
||||
args = f"--output_dir {output_dir}/{name} --per_device_train_batch_size {bs} --average_tokens_across_devices {enable}".split()
|
||||
cmd = ["torchrun"] + distributed_args + args
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
with open(f"{output_dir}/base_losses.json") as f:
|
||||
base_loss = json.load(f)
|
||||
with open(f"{output_dir}/broken_losses.json") as f:
|
||||
broken_loss = json.load(f)
|
||||
with open(f"{output_dir}/fixed_losses.json") as f:
|
||||
fixed_loss = json.load(f)
|
||||
|
||||
broken_diff = [abs(base_loss[i] - broken_loss[i]) for i in range(len(base_loss))]
|
||||
fixed_diff = [abs(base_loss[i] - fixed_loss[i]) for i in range(len(base_loss))]
|
||||
sum_base = sum(base_loss)
|
||||
sum_broken = sum(broken_loss)
|
||||
relative_broken = abs(sum_base - sum_broken) / max(sum_base, sum_broken)
|
||||
|
||||
# the gap may be smaller for other models, but it still ok.
|
||||
self.assertGreater(max(broken_diff), 0.5)
|
||||
self.assertLess(max(fixed_diff), 0.005)
|
||||
self.assertLess(relative_broken, 0.1)
|
||||
|
||||
|
||||
def run_distributed_training(training_args):
|
||||
set_seed(42)
|
||||
model_name = "nickypro/tinyllama-15M"
|
||||
dataset_name = "wikitext"
|
||||
dataset_config = "wikitext-2-raw-v1"
|
||||
dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:100]")
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
|
||||
def tokenize_function(examples):
|
||||
return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True)
|
||||
|
||||
tokenized_dataset = dataset.map(tokenize_function, batched=True)
|
||||
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name)
|
||||
|
||||
loss_callback = StoreLossCallback()
|
||||
|
||||
training_args.logging_steps = 1
|
||||
training_args.max_steps = 10
|
||||
training_args.learning_rate = 3e-4
|
||||
training_args.disable_tqdm = True
|
||||
training_args.dataloader_drop_last = True
|
||||
training_args.report_to = []
|
||||
|
||||
trainer = Trainer(
|
||||
model,
|
||||
training_args,
|
||||
train_dataset=tokenized_dataset,
|
||||
callbacks=[loss_callback],
|
||||
data_collator=data_collator,
|
||||
)
|
||||
trainer.train()
|
||||
with open(training_args.output_dir + "_losses.json", "w") as f:
|
||||
json.dump(loss_callback.losses, f)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = HfArgumentParser((TrainingArguments,))
|
||||
training_args = parser.parse_args_into_dataclasses()[0]
|
||||
run_distributed_training(training_args)
|
||||
@@ -0,0 +1,93 @@
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from transformers import (
|
||||
HfArgumentParser,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.testing_utils import (
|
||||
TestCasePlus,
|
||||
backend_device_count,
|
||||
execute_subprocess_async,
|
||||
get_torch_dist_unique_port,
|
||||
require_torch_multi_accelerator,
|
||||
run_first,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
def gather_from_all_gpus(tensor, world_size):
|
||||
# Prepare a list to gather tensors from all processes
|
||||
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
|
||||
dist.all_gather(gather_list, tensor)
|
||||
return gather_list # List of tensors from all ranks
|
||||
|
||||
|
||||
class DummyDataset(Dataset):
|
||||
def __init__(self):
|
||||
self.length = 64
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __getitem__(self, i) -> int:
|
||||
x = random.random()
|
||||
y = np.random.random()
|
||||
z = torch.rand([]).item()
|
||||
return {"x": torch.tensor([x, y, z])}
|
||||
|
||||
|
||||
class DummyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc = nn.Linear(3, 1)
|
||||
|
||||
def forward(self, x):
|
||||
local_tensor = torch.tensor(x, device=torch_device)
|
||||
gathered = gather_from_all_gpus(local_tensor, dist.get_world_size())
|
||||
assert not all(torch.allclose(t, gathered[0]) for t in gathered[1:])
|
||||
y = self.fc(x)
|
||||
return (y.mean(), y)
|
||||
|
||||
|
||||
class TestTrainerDistributedWorkerSeed(TestCasePlus):
|
||||
@run_first
|
||||
@require_torch_multi_accelerator
|
||||
def test_trainer(self):
|
||||
device_count = backend_device_count(torch_device)
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
distributed_args = f"""--nproc_per_node={device_count}
|
||||
--master_port={get_torch_dist_unique_port()}
|
||||
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
|
||||
""".split()
|
||||
args = f"--output_dir {output_dir}".split()
|
||||
cmd = ["torchrun"] + distributed_args + args
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
|
||||
|
||||
def run_distributed_training(training_args):
|
||||
set_seed(42)
|
||||
model = DummyModel()
|
||||
dataset = DummyDataset()
|
||||
training_args.max_steps = 10
|
||||
# dataloader_num_workers must be > 0 to enable worker_init_fn
|
||||
training_args.dataloader_num_workers = 2
|
||||
trainer = Trainer(
|
||||
model,
|
||||
training_args,
|
||||
train_dataset=dataset,
|
||||
)
|
||||
trainer.train()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = HfArgumentParser((TrainingArguments,))
|
||||
training_args = parser.parse_args_into_dataclasses()[0]
|
||||
run_distributed_training(training_args)
|
||||
203
transformers/tests/trainer/test_trainer_fsdp.py
Normal file
203
transformers/tests/trainer/test_trainer_fsdp.py
Normal file
@@ -0,0 +1,203 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from transformers import is_torch_available
|
||||
from transformers.testing_utils import (
|
||||
TestCasePlus,
|
||||
backend_device_count,
|
||||
execute_subprocess_async,
|
||||
get_torch_dist_unique_port,
|
||||
require_accelerate,
|
||||
require_fp8,
|
||||
require_torch_multi_accelerator,
|
||||
run_first,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
import torch.distributed
|
||||
import torch.utils.data
|
||||
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
DataCollatorForSeq2Seq,
|
||||
EvalPrediction,
|
||||
GenerationConfig,
|
||||
HfArgumentParser,
|
||||
PreTrainedTokenizerBase,
|
||||
Seq2SeqTrainer,
|
||||
Seq2SeqTrainingArguments,
|
||||
)
|
||||
|
||||
class DummyTextDataset(torch.utils.data.Dataset[str]):
|
||||
def __init__(self, tokenizer: PreTrainedTokenizerBase) -> None:
|
||||
data = 4 * [
|
||||
"Hello world!",
|
||||
"The quick brown fox jumps over the lazy dog.",
|
||||
]
|
||||
self.data = [
|
||||
{k: v.squeeze(0) for k, v in tokenizer(item, return_tensors="pt", return_attention_mask=True).items()}
|
||||
for item in data
|
||||
]
|
||||
for item in self.data:
|
||||
item["labels"] = item["input_ids"]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, i: int) -> str:
|
||||
return self.data[i]
|
||||
|
||||
|
||||
class TestFSDPTrainer(TestCasePlus):
|
||||
@require_torch_multi_accelerator
|
||||
@require_accelerate
|
||||
@run_first
|
||||
def test_trainer(self):
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
cmd = [
|
||||
"accelerate",
|
||||
"launch",
|
||||
"--use_fsdp",
|
||||
"--main_process_port",
|
||||
f"{get_torch_dist_unique_port()}",
|
||||
"--num_processes",
|
||||
f"{backend_device_count(torch_device)}",
|
||||
"--fsdp_transformer_layer_cls_to_wrap",
|
||||
"GPT2Block",
|
||||
f"{self.test_file_dir}/test_trainer_fsdp.py",
|
||||
"--output_dir",
|
||||
f"{output_dir}",
|
||||
"--report_to",
|
||||
"none",
|
||||
]
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
# successful return here == success - any errors would have caused an error in the sub-call
|
||||
|
||||
|
||||
class TestFSDPTrainerFP8(TestCasePlus):
|
||||
@require_torch_multi_accelerator
|
||||
@require_accelerate
|
||||
@require_fp8
|
||||
@run_first
|
||||
def test_trainer(self):
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
cmd = [
|
||||
"accelerate",
|
||||
"launch",
|
||||
"--use_fsdp",
|
||||
"--main_process_port",
|
||||
f"{get_torch_dist_unique_port()}",
|
||||
"--num_processes",
|
||||
f"{backend_device_count(torch_device)}",
|
||||
"--mixed_precision",
|
||||
"fp8",
|
||||
"--fsdp_transformer_layer_cls_to_wrap",
|
||||
"GPT2Block",
|
||||
f"{self.test_file_dir}/test_trainer_fsdp.py",
|
||||
"--output_dir",
|
||||
f"{output_dir}",
|
||||
"--report_to",
|
||||
"none",
|
||||
]
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
# successful return here == success - any errors would have caused an error in the sub-call
|
||||
|
||||
|
||||
class TestFSDPTrainerWrap(TestCasePlus):
|
||||
@require_torch_multi_accelerator
|
||||
@require_accelerate
|
||||
@run_first
|
||||
def test_trainer(self):
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
cmd = [
|
||||
"accelerate",
|
||||
"launch",
|
||||
"--use_fsdp",
|
||||
"--main_process_port",
|
||||
f"{get_torch_dist_unique_port()}",
|
||||
"--num_processes",
|
||||
f"{backend_device_count(torch_device)}",
|
||||
"--fsdp_transformer_layer_cls_to_wrap",
|
||||
"GPT2Block",
|
||||
f"{self.test_file_dir}/test_trainer_fsdp.py",
|
||||
"--output_dir",
|
||||
f"{output_dir}",
|
||||
"--report_to",
|
||||
"none",
|
||||
"--auto_find_batch_size",
|
||||
"True",
|
||||
]
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
# successful return here == success - any errors would have caused an error in the sub-call
|
||||
|
||||
|
||||
class TestFSDPTrainerTorchCompile(TestCasePlus):
|
||||
@require_torch_multi_accelerator
|
||||
@require_accelerate
|
||||
@run_first
|
||||
def test_trainer(self):
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
cmd = [
|
||||
"accelerate",
|
||||
"launch",
|
||||
"--use_fsdp",
|
||||
"--main_process_port",
|
||||
f"{get_torch_dist_unique_port()}",
|
||||
"--num_processes",
|
||||
f"{backend_device_count(torch_device)}",
|
||||
"--fsdp_transformer_layer_cls_to_wrap",
|
||||
"GPT2Block",
|
||||
f"{self.test_file_dir}/test_trainer_fsdp.py",
|
||||
"--torch_compile_mode",
|
||||
"default",
|
||||
"--output_dir",
|
||||
f"{output_dir}",
|
||||
"--report_to",
|
||||
"none",
|
||||
]
|
||||
execute_subprocess_async(cmd, env=self.get_env())
|
||||
# successful return here == success - any errors would have caused an error in the sub-call
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = HfArgumentParser((Seq2SeqTrainingArguments,))
|
||||
training_args = parser.parse_args_into_dataclasses()[0]
|
||||
training_args.per_device_eval_batch_size = 1
|
||||
training_args.use_legacy_prediction_loop = False
|
||||
training_args.predict_with_generate = True
|
||||
training_args.generation_config = GenerationConfig(max_length=30)
|
||||
|
||||
pretrained_model_name = "hf-internal-testing/tiny-random-gpt2"
|
||||
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
device = torch.device(torch.distributed.get_rank())
|
||||
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name).to(device)
|
||||
|
||||
def compute_metrics(p: EvalPrediction) -> dict[str, bool]:
|
||||
return {"accuracy": (p.predictions == p.label_ids).mean()}
|
||||
|
||||
trainer = Seq2SeqTrainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
data_collator=DataCollatorForSeq2Seq(tokenizer, model),
|
||||
eval_dataset=DummyTextDataset(tokenizer),
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
|
||||
metrics = trainer.evaluate()
|
||||
205
transformers/tests/trainer/test_trainer_seq2seq.py
Normal file
205
transformers/tests/trainer/test_trainer_seq2seq.py
Normal file
@@ -0,0 +1,205 @@
|
||||
# Copyright 2020 the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from transformers import (
|
||||
AutoModelForSeq2SeqLM,
|
||||
BertTokenizer,
|
||||
DataCollatorForSeq2Seq,
|
||||
EncoderDecoderModel,
|
||||
GenerationConfig,
|
||||
Seq2SeqTrainer,
|
||||
Seq2SeqTrainingArguments,
|
||||
T5Tokenizer,
|
||||
)
|
||||
from transformers.testing_utils import TestCasePlus, require_sentencepiece, require_torch, slow
|
||||
from transformers.utils import is_datasets_available
|
||||
|
||||
|
||||
if is_datasets_available():
|
||||
import datasets
|
||||
|
||||
|
||||
@require_sentencepiece
|
||||
class Seq2seqTrainerTester(TestCasePlus):
|
||||
@slow
|
||||
@require_torch
|
||||
def test_finetune_bert2bert(self):
|
||||
bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny")
|
||||
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
||||
|
||||
bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size
|
||||
bert2bert.config.eos_token_id = tokenizer.sep_token_id
|
||||
bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id
|
||||
bert2bert.config.max_length = 128
|
||||
|
||||
train_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="train[:1%]")
|
||||
val_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="validation[:1%]")
|
||||
|
||||
train_dataset = train_dataset.select(range(32))
|
||||
val_dataset = val_dataset.select(range(16))
|
||||
|
||||
batch_size = 4
|
||||
|
||||
def _map_to_encoder_decoder_inputs(batch):
|
||||
# Tokenizer will automatically set [BOS] <text> [EOS]
|
||||
inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512)
|
||||
outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128)
|
||||
batch["input_ids"] = inputs.input_ids
|
||||
batch["attention_mask"] = inputs.attention_mask
|
||||
|
||||
batch["decoder_input_ids"] = outputs.input_ids
|
||||
batch["labels"] = outputs.input_ids.copy()
|
||||
batch["labels"] = [
|
||||
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
|
||||
]
|
||||
batch["decoder_attention_mask"] = outputs.attention_mask
|
||||
|
||||
assert all(len(x) == 512 for x in inputs.input_ids)
|
||||
assert all(len(x) == 128 for x in outputs.input_ids)
|
||||
|
||||
return batch
|
||||
|
||||
def _compute_metrics(pred):
|
||||
labels_ids = pred.label_ids
|
||||
pred_ids = pred.predictions
|
||||
|
||||
# all unnecessary tokens are removed
|
||||
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
|
||||
label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
|
||||
|
||||
accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str)
|
||||
|
||||
return {"accuracy": accuracy}
|
||||
|
||||
# map train dataset
|
||||
train_dataset = train_dataset.map(
|
||||
_map_to_encoder_decoder_inputs,
|
||||
batched=True,
|
||||
batch_size=batch_size,
|
||||
remove_columns=["article", "highlights"],
|
||||
)
|
||||
train_dataset.set_format(
|
||||
type="torch",
|
||||
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
|
||||
)
|
||||
|
||||
# same for validation dataset
|
||||
val_dataset = val_dataset.map(
|
||||
_map_to_encoder_decoder_inputs,
|
||||
batched=True,
|
||||
batch_size=batch_size,
|
||||
remove_columns=["article", "highlights"],
|
||||
)
|
||||
val_dataset.set_format(
|
||||
type="torch",
|
||||
columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"],
|
||||
)
|
||||
|
||||
output_dir = self.get_auto_remove_tmp_dir()
|
||||
|
||||
training_args = Seq2SeqTrainingArguments(
|
||||
output_dir=output_dir,
|
||||
per_device_train_batch_size=batch_size,
|
||||
per_device_eval_batch_size=batch_size,
|
||||
predict_with_generate=True,
|
||||
eval_strategy="steps",
|
||||
do_train=True,
|
||||
do_eval=True,
|
||||
warmup_steps=0,
|
||||
eval_steps=2,
|
||||
logging_steps=2,
|
||||
report_to="none",
|
||||
)
|
||||
|
||||
# instantiate trainer
|
||||
trainer = Seq2SeqTrainer(
|
||||
model=bert2bert,
|
||||
args=training_args,
|
||||
compute_metrics=_compute_metrics,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=val_dataset,
|
||||
processing_class=tokenizer,
|
||||
)
|
||||
|
||||
# start training
|
||||
trainer.train()
|
||||
|
||||
@slow
|
||||
@require_torch
|
||||
def test_return_sequences(self):
|
||||
# Tests that the number of generated sequences is correct when num_return_sequences > 1
|
||||
# and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics`
|
||||
INPUT_COLUMN = "question"
|
||||
TARGET_COLUMN = "answer"
|
||||
MAX_INPUT_LENGTH = 256
|
||||
MAX_TARGET_LENGTH = 256
|
||||
|
||||
dataset = datasets.load_dataset("openai/gsm8k", "main", split="train[:38]")
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small")
|
||||
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
|
||||
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest")
|
||||
gen_config = GenerationConfig.from_pretrained(
|
||||
"google-t5/t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5
|
||||
)
|
||||
|
||||
training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True, report_to="none")
|
||||
|
||||
trainer = Seq2SeqTrainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
processing_class=tokenizer,
|
||||
data_collator=data_collator,
|
||||
compute_metrics=lambda x: {"samples": x[0].shape[0]},
|
||||
)
|
||||
|
||||
def prepare_data(examples):
|
||||
# Remove pairs where at least one record is none
|
||||
inputs = examples[INPUT_COLUMN]
|
||||
targets = examples[TARGET_COLUMN]
|
||||
|
||||
model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True)
|
||||
labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True)
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
|
||||
return model_inputs
|
||||
|
||||
prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN])
|
||||
dataset_len = len(prepared_dataset) # 38
|
||||
|
||||
for num_return_sequences in range(3, 0, -1):
|
||||
gen_config.num_return_sequences = num_return_sequences
|
||||
metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config)
|
||||
assert metrics["eval_samples"] == dataset_len * num_return_sequences, (
|
||||
f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}"
|
||||
)
|
||||
|
||||
@require_torch
|
||||
def test_bad_generation_config_fail_early(self):
|
||||
# Tests that a bad generation config causes the trainer to fail early
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small")
|
||||
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
|
||||
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest")
|
||||
gen_config = GenerationConfig(do_sample=False, top_p=0.9) # bad: top_p is not compatible with do_sample=False
|
||||
|
||||
training_args = Seq2SeqTrainingArguments(
|
||||
".", predict_with_generate=True, generation_config=gen_config, report_to="none"
|
||||
)
|
||||
with self.assertRaises(ValueError) as exc:
|
||||
_ = Seq2SeqTrainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
processing_class=tokenizer,
|
||||
data_collator=data_collator,
|
||||
compute_metrics=lambda x: {"samples": x[0].shape[0]},
|
||||
)
|
||||
self.assertIn("Fix these issues to train your model", str(exc.exception))
|
||||
130
transformers/tests/trainer/test_trainer_tpu.py
Normal file
130
transformers/tests/trainer/test_trainer_tpu.py
Normal file
@@ -0,0 +1,130 @@
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This test is meant to be run in on an instance with TPUs like this:
|
||||
#
|
||||
# python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py
|
||||
#
|
||||
# Replace 8 with the number of TPU cores you have.
|
||||
#
|
||||
|
||||
import sys
|
||||
|
||||
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from transformers import Trainer
|
||||
|
||||
class DummyDataset(Dataset):
|
||||
def __init__(self, length: int = 101):
|
||||
self.length = length
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
def __getitem__(self, i) -> int:
|
||||
return i
|
||||
|
||||
class DummyDataCollator:
|
||||
def __call__(self, features):
|
||||
return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)}
|
||||
|
||||
class DummyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Add some (unused) params otherwise DDP will complain.
|
||||
self.fc = nn.Linear(120, 80)
|
||||
|
||||
def forward(self, input_ids, labels=None):
|
||||
if labels is not None:
|
||||
return torch.tensor(0.0, device=input_ids.device), input_ids
|
||||
else:
|
||||
return input_ids
|
||||
|
||||
|
||||
def main():
|
||||
parser = HfArgumentParser((TrainingArguments,))
|
||||
sys.argv += ["--output_dir", "./examples"]
|
||||
training_args = parser.parse_args_into_dataclasses()[0]
|
||||
|
||||
logger.warning(
|
||||
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
|
||||
f"tpu_num_cores: {training_args.tpu_num_cores}",
|
||||
)
|
||||
|
||||
# Essentially, what we want to verify in the distributed case is
|
||||
# that we get all samples back, in the right order.
|
||||
# (this is crucial for prediction for instance)
|
||||
for dataset_length in [1001, 256, 15]:
|
||||
dataset = DummyDataset(dataset_length)
|
||||
|
||||
def compute_metrics(p: EvalPrediction) -> dict:
|
||||
sequential = list(range(len(dataset)))
|
||||
success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
|
||||
return {"success": success}
|
||||
|
||||
trainer = Trainer(
|
||||
model=DummyModel(),
|
||||
args=training_args,
|
||||
data_collator=DummyDataCollator(),
|
||||
eval_dataset=dataset,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
metrics = trainer.evaluate()
|
||||
logger.info(metrics)
|
||||
if metrics["eval_success"] is not True:
|
||||
logger.error(metrics)
|
||||
exit(1)
|
||||
|
||||
p = trainer.predict(dataset)
|
||||
logger.info(p.metrics)
|
||||
if p.metrics["test_success"] is not True:
|
||||
logger.error(p.metrics)
|
||||
exit(1)
|
||||
|
||||
trainer.args.eval_accumulation_steps = 2
|
||||
|
||||
metrics = trainer.evaluate()
|
||||
logger.info(metrics)
|
||||
if metrics["eval_success"] is not True:
|
||||
logger.error(metrics)
|
||||
exit(1)
|
||||
|
||||
p = trainer.predict(dataset)
|
||||
logger.info(p.metrics)
|
||||
if p.metrics["test_success"] is not True:
|
||||
logger.error(p.metrics)
|
||||
exit(1)
|
||||
|
||||
trainer.args.eval_accumulation_steps = None
|
||||
|
||||
logger.info("🔥 All distributed tests successful")
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
# For xla_spawn (TPUs)
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
655
transformers/tests/trainer/test_trainer_utils.py
Normal file
655
transformers/tests/trainer/test_trainer_utils.py
Normal file
@@ -0,0 +1,655 @@
|
||||
# Copyright 2018 the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import Trainer, TrainingArguments
|
||||
from transformers.data.data_collator import default_data_collator
|
||||
from transformers.testing_utils import require_accelerate, require_torch
|
||||
from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size
|
||||
from transformers.utils import is_torch_available
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.utils.data import IterableDataset
|
||||
|
||||
from transformers.modeling_outputs import SequenceClassifierOutput
|
||||
from transformers.tokenization_utils_base import BatchEncoding
|
||||
from transformers.trainer_pt_utils import (
|
||||
DistributedLengthGroupedSampler,
|
||||
DistributedSamplerWithLoop,
|
||||
DistributedTensorGatherer,
|
||||
EvalLoopContainer,
|
||||
IterableDatasetShard,
|
||||
LabelSmoother,
|
||||
LengthGroupedSampler,
|
||||
SequentialDistributedSampler,
|
||||
ShardSampler,
|
||||
get_parameter_names,
|
||||
numpy_pad_and_concatenate,
|
||||
torch_pad_and_concatenate,
|
||||
)
|
||||
|
||||
class TstLayer(nn.Module):
|
||||
def __init__(self, hidden_size):
|
||||
super().__init__()
|
||||
self.linear1 = nn.Linear(hidden_size, hidden_size)
|
||||
self.ln1 = nn.LayerNorm(hidden_size)
|
||||
self.linear2 = nn.Linear(hidden_size, hidden_size)
|
||||
self.ln2 = nn.LayerNorm(hidden_size)
|
||||
self.bias = nn.Parameter(torch.zeros(hidden_size))
|
||||
|
||||
def forward(self, x):
|
||||
h = self.ln1(nn.functional.relu(self.linear1(x)))
|
||||
h = nn.functional.relu(self.linear2(x))
|
||||
return self.ln2(x + h + self.bias)
|
||||
|
||||
class RandomIterableDataset(IterableDataset):
|
||||
# For testing, an iterable dataset of random length
|
||||
def __init__(self, p_stop=0.01, max_length=1000):
|
||||
self.p_stop = p_stop
|
||||
self.max_length = max_length
|
||||
self.generator = torch.Generator()
|
||||
|
||||
def __iter__(self):
|
||||
count = 0
|
||||
stop = False
|
||||
while not stop and count < self.max_length:
|
||||
yield count
|
||||
count += 1
|
||||
number = torch.rand(1, generator=self.generator).item()
|
||||
stop = number < self.p_stop
|
||||
|
||||
|
||||
@require_torch
|
||||
class TrainerUtilsTest(unittest.TestCase):
|
||||
def test_distributed_tensor_gatherer(self):
|
||||
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
|
||||
world_size = 4
|
||||
num_samples = 21
|
||||
input_indices = [
|
||||
[0, 1, 6, 7, 12, 13, 18, 19],
|
||||
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
|
||||
[5, 11, 17, 2],
|
||||
]
|
||||
|
||||
predictions = np.random.normal(size=(num_samples, 13))
|
||||
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
|
||||
for indices in input_indices:
|
||||
gatherer.add_arrays(predictions[indices])
|
||||
result = gatherer.finalize()
|
||||
self.assertTrue(np.array_equal(result, predictions))
|
||||
|
||||
# With nested tensors
|
||||
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
|
||||
for indices in input_indices:
|
||||
gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]])
|
||||
result = gatherer.finalize()
|
||||
self.assertTrue(isinstance(result, list))
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertTrue(isinstance(result[1], list))
|
||||
self.assertEqual(len(result[1]), 2)
|
||||
self.assertTrue(np.array_equal(result[0], predictions))
|
||||
self.assertTrue(np.array_equal(result[1][0], predictions))
|
||||
self.assertTrue(np.array_equal(result[1][1], predictions))
|
||||
|
||||
def test_distributed_tensor_gatherer_different_shapes(self):
|
||||
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
|
||||
world_size = 4
|
||||
num_samples = 21
|
||||
input_indices = [
|
||||
[0, 1, 6, 7, 12, 13, 18, 19],
|
||||
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
|
||||
[5, 11, 17, 2],
|
||||
]
|
||||
sequence_lengths = [8, 10, 13]
|
||||
|
||||
predictions = np.random.normal(size=(num_samples, 13))
|
||||
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
|
||||
for indices, seq_length in zip(input_indices, sequence_lengths):
|
||||
gatherer.add_arrays(predictions[indices, :seq_length])
|
||||
result = gatherer.finalize()
|
||||
|
||||
# Remove the extra samples added at the end for a round multiple of num processes.
|
||||
actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]]
|
||||
for indices, seq_length in zip(actual_indices, sequence_lengths):
|
||||
self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length]))
|
||||
|
||||
# With nested tensors
|
||||
predictions = np.random.normal(size=(num_samples, 13))
|
||||
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
|
||||
for indices, seq_length in zip(input_indices, sequence_lengths):
|
||||
gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]])
|
||||
result = gatherer.finalize()
|
||||
|
||||
for indices, seq_length in zip(actual_indices, sequence_lengths):
|
||||
self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length]))
|
||||
self.assertTrue(np.array_equal(result[1], predictions))
|
||||
|
||||
# Check if works if varying seq_length is second
|
||||
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
|
||||
for indices, seq_length in zip(input_indices, sequence_lengths):
|
||||
gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]])
|
||||
result = gatherer.finalize()
|
||||
|
||||
self.assertTrue(np.array_equal(result[0], predictions))
|
||||
for indices, seq_length in zip(actual_indices, sequence_lengths):
|
||||
self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length]))
|
||||
|
||||
def test_label_smoothing(self):
|
||||
epsilon = 0.1
|
||||
num_labels = 12
|
||||
random_logits = torch.randn(4, 5, num_labels)
|
||||
random_labels = torch.randint(0, num_labels, (4, 5))
|
||||
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
|
||||
model_output = SequenceClassifierOutput(logits=random_logits)
|
||||
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
|
||||
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
|
||||
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
|
||||
torch.testing.assert_close(label_smoothed_loss, expected_loss)
|
||||
|
||||
# With a few -100 labels
|
||||
random_labels[0, 1] = -100
|
||||
random_labels[2, 1] = -100
|
||||
random_labels[2, 3] = -100
|
||||
|
||||
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
|
||||
model_output = SequenceClassifierOutput(logits=random_logits)
|
||||
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
|
||||
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
|
||||
# Mask the log probs with the -100 labels
|
||||
log_probs[0, 1] = 0.0
|
||||
log_probs[2, 1] = 0.0
|
||||
log_probs[2, 3] = 0.0
|
||||
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
|
||||
torch.testing.assert_close(label_smoothed_loss, expected_loss)
|
||||
|
||||
def test_group_by_length(self):
|
||||
# Get some inputs of random lengths
|
||||
lengths = torch.randint(0, 25, (100,)).tolist()
|
||||
# Put one bigger than the others to check it ends up in first position
|
||||
lengths[32] = 50
|
||||
|
||||
indices = list(LengthGroupedSampler(4, lengths=lengths))
|
||||
# The biggest element should be first
|
||||
self.assertEqual(lengths[indices[0]], 50)
|
||||
# The indices should be a permutation of range(100)
|
||||
self.assertEqual(sorted(indices), list(range(100)))
|
||||
|
||||
def test_group_by_length_with_dict(self):
|
||||
# Get some inputs of random lengths
|
||||
data = []
|
||||
for _ in range(6):
|
||||
input_ids = torch.randint(0, 25, (100,)).tolist()
|
||||
data.append({"input_ids": input_ids})
|
||||
# Put one bigger than the others to check it ends up in first position
|
||||
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
|
||||
|
||||
indices = list(LengthGroupedSampler(4, dataset=data))
|
||||
# The biggest element should be first
|
||||
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
|
||||
# The indices should be a permutation of range(6)
|
||||
self.assertEqual(sorted(indices), list(range(6)))
|
||||
|
||||
def test_group_by_length_with_batch_encoding(self):
|
||||
# Get some inputs of random lengths
|
||||
data = []
|
||||
for _ in range(6):
|
||||
input_ids = torch.randint(0, 25, (100,)).tolist()
|
||||
data.append(BatchEncoding({"input_ids": input_ids}))
|
||||
# Put one bigger than the others to check it ends up in first position
|
||||
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
|
||||
|
||||
indices = list(LengthGroupedSampler(4, dataset=data))
|
||||
# The biggest element should be first
|
||||
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
|
||||
# The indices should be a permutation of range(6)
|
||||
self.assertEqual(sorted(indices), list(range(6)))
|
||||
|
||||
def test_distributed_length_grouped(self):
|
||||
# Get some inputs of random lengths
|
||||
lengths = torch.randint(0, 25, (100,)).tolist()
|
||||
# Put one bigger than the others to check it ends up in first position
|
||||
lengths[32] = 50
|
||||
|
||||
indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths))
|
||||
indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths))
|
||||
# The biggest element should be first
|
||||
self.assertEqual(lengths[indices_process_0[0]], 50)
|
||||
# The indices should be a permutation of range(100)
|
||||
self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100)))
|
||||
|
||||
def test_get_parameter_names(self):
|
||||
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
|
||||
# fmt: off
|
||||
self.assertEqual(
|
||||
get_parameter_names(model, [nn.LayerNorm]),
|
||||
['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias']
|
||||
)
|
||||
# fmt: on
|
||||
|
||||
def test_get_parameter_names_rmsnorm(self):
|
||||
class RMSNorm(nn.Module):
|
||||
def __init__(self, hidden_size):
|
||||
super().__init__()
|
||||
self.weight = nn.Parameter(torch.ones(hidden_size))
|
||||
self.bias = nn.Parameter(torch.zeros(hidden_size))
|
||||
|
||||
class ModelWithRMSNorm(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear = nn.Linear(128, 128)
|
||||
self.rmsnorm = RMSNorm(128)
|
||||
self.bias = nn.Parameter(torch.zeros(128))
|
||||
|
||||
model = ModelWithRMSNorm()
|
||||
# Test both type-based and name-based filtering
|
||||
decay_parameters = get_parameter_names(model, [], ["bias", "rmsnorm"])
|
||||
|
||||
# Parameters that should be in weight decay
|
||||
self.assertIn("linear.weight", decay_parameters)
|
||||
|
||||
# Parameters that should NOT be in weight decay
|
||||
self.assertNotIn("linear.bias", decay_parameters)
|
||||
self.assertNotIn("rmsnorm.weight", decay_parameters)
|
||||
self.assertNotIn("rmsnorm.bias", decay_parameters)
|
||||
self.assertNotIn("bias", decay_parameters)
|
||||
|
||||
def test_distributed_sampler_with_loop(self):
|
||||
batch_size = 16
|
||||
for length in [23, 64, 123]:
|
||||
dataset = list(range(length))
|
||||
shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0)
|
||||
shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1)
|
||||
|
||||
# Set seeds
|
||||
shard1.set_epoch(0)
|
||||
shard2.set_epoch(0)
|
||||
|
||||
# Sample
|
||||
samples1 = list(shard1)
|
||||
samples2 = list(shard2)
|
||||
|
||||
self.assertTrue(len(samples1) % batch_size == 0)
|
||||
self.assertTrue(len(samples2) % batch_size == 0)
|
||||
|
||||
total = []
|
||||
for sample1, sample2 in zip(samples1, samples2):
|
||||
total += [sample1, sample2]
|
||||
|
||||
self.assertEqual(set(total[:length]), set(dataset))
|
||||
self.assertEqual(set(total[length:]), set(total[: (len(total) - length)]))
|
||||
|
||||
def test_sequential_distributed_sampler(self):
|
||||
batch_size = 16
|
||||
for length in [23, 64, 123]:
|
||||
dataset = list(range(length))
|
||||
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0)
|
||||
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1)
|
||||
|
||||
# Sample
|
||||
samples1 = list(shard1)
|
||||
samples2 = list(shard2)
|
||||
|
||||
total = samples1 + samples2
|
||||
|
||||
self.assertListEqual(total[:length], dataset)
|
||||
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
|
||||
|
||||
# With a batch_size passed
|
||||
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size)
|
||||
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size)
|
||||
|
||||
# Sample
|
||||
samples1 = list(shard1)
|
||||
samples2 = list(shard2)
|
||||
|
||||
self.assertTrue(len(samples1) % batch_size == 0)
|
||||
self.assertTrue(len(samples2) % batch_size == 0)
|
||||
|
||||
total = samples1 + samples2
|
||||
|
||||
self.assertListEqual(total[:length], dataset)
|
||||
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
|
||||
|
||||
def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0):
|
||||
# Set the seed for the base dataset to get the proper reference.
|
||||
dataset.generator.manual_seed(epoch)
|
||||
reference = list(dataset)
|
||||
|
||||
shards = [
|
||||
IterableDatasetShard(
|
||||
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
|
||||
)
|
||||
for i in range(num_processes)
|
||||
]
|
||||
for shard in shards:
|
||||
shard.set_epoch(epoch)
|
||||
shard_lists = [list(shard) for shard in shards]
|
||||
|
||||
for shard in shard_lists:
|
||||
# All shards have a number of samples that is a round multiple of batch size
|
||||
self.assertTrue(len(shard) % batch_size == 0)
|
||||
# All shards have the same number of samples
|
||||
self.assertEqual(len(shard), len(shard_lists[0]))
|
||||
|
||||
for shard in shards:
|
||||
# All shards know the total number of samples
|
||||
self.assertEqual(shard.num_examples, len(reference))
|
||||
|
||||
observed = []
|
||||
for idx in range(0, len(shard_lists[0]), batch_size):
|
||||
for shard in shard_lists:
|
||||
observed += shard[idx : idx + batch_size]
|
||||
|
||||
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
|
||||
# batch_size
|
||||
if not drop_last:
|
||||
while len(reference) < len(observed):
|
||||
reference += reference
|
||||
self.assertListEqual(observed, reference[: len(observed)])
|
||||
|
||||
# Check equivalence between IterableDataset and ShardSampler
|
||||
dataset.generator.manual_seed(epoch)
|
||||
reference = list(dataset)
|
||||
|
||||
sampler_shards = [
|
||||
ShardSampler(
|
||||
reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
|
||||
)
|
||||
for i in range(num_processes)
|
||||
]
|
||||
for shard, sampler_shard in zip(shard_lists, sampler_shards):
|
||||
self.assertListEqual(shard, list(sampler_shard))
|
||||
|
||||
def test_iterable_dataset_shard(self):
|
||||
dataset = RandomIterableDataset()
|
||||
|
||||
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)
|
||||
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0)
|
||||
|
||||
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42)
|
||||
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42)
|
||||
|
||||
def test_iterable_dataset_shard_with_length(self):
|
||||
sampler_shards = [
|
||||
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i)
|
||||
for i in range(2)
|
||||
]
|
||||
|
||||
# Build expected shards: each process will have batches of size 4 until there is not enough elements to
|
||||
# form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4)
|
||||
expected_shards = [[], []]
|
||||
current_shard = 0
|
||||
for i in range(0, 96, 4):
|
||||
expected_shards[current_shard].extend(list(range(i, i + 4)))
|
||||
current_shard = 1 - current_shard
|
||||
|
||||
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
|
||||
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
|
||||
|
||||
sampler_shards = [
|
||||
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i)
|
||||
for i in range(2)
|
||||
]
|
||||
# When drop_last=False, we get two last full batches by looping back to the beginning.
|
||||
expected_shards[0].extend(list(range(96, 100)))
|
||||
expected_shards[1].extend(list(range(0, 4)))
|
||||
|
||||
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
|
||||
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
|
||||
|
||||
def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2):
|
||||
shards = [
|
||||
ShardSampler(
|
||||
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
|
||||
)
|
||||
for i in range(num_processes)
|
||||
]
|
||||
shard_lists = [list(shard) for shard in shards]
|
||||
|
||||
for shard in shard_lists:
|
||||
# All shards have a number of samples that is a round multiple of batch size
|
||||
self.assertTrue(len(shard) % batch_size == 0)
|
||||
# All shards have the same number of samples
|
||||
self.assertEqual(len(shard), len(shard_lists[0]))
|
||||
|
||||
observed = []
|
||||
for idx in range(0, len(shard_lists[0]), batch_size):
|
||||
for shard in shard_lists:
|
||||
observed += shard[idx : idx + batch_size]
|
||||
|
||||
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
|
||||
# batch_size
|
||||
reference = copy.copy(dataset)
|
||||
if not drop_last:
|
||||
while len(reference) < len(observed):
|
||||
reference += reference
|
||||
self.assertListEqual(observed, reference[: len(observed)])
|
||||
|
||||
def test_shard_sampler(self):
|
||||
for n_elements in [64, 123]:
|
||||
dataset = list(range(n_elements))
|
||||
|
||||
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2)
|
||||
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2)
|
||||
|
||||
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3)
|
||||
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
|
||||
|
||||
@require_accelerate
|
||||
def test_executable_batch_size(self):
|
||||
batch_sizes = []
|
||||
|
||||
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True)
|
||||
def mock_training_loop_function(batch_size):
|
||||
nonlocal batch_sizes
|
||||
batch_sizes.append(batch_size)
|
||||
if batch_size > 16:
|
||||
raise RuntimeError("CUDA out of memory.")
|
||||
|
||||
mock_training_loop_function()
|
||||
self.assertEqual(batch_sizes, [64, 57, 51, 45, 40, 36, 32, 28, 25, 22, 19, 17, 15])
|
||||
|
||||
@require_accelerate
|
||||
def test_executable_batch_size_no_search(self):
|
||||
batch_sizes = []
|
||||
|
||||
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
|
||||
def mock_training_loop_function(batch_size):
|
||||
nonlocal batch_sizes
|
||||
batch_sizes.append(batch_size)
|
||||
|
||||
mock_training_loop_function()
|
||||
self.assertEqual(batch_sizes, [64])
|
||||
|
||||
@require_accelerate
|
||||
def test_executable_batch_size_with_error(self):
|
||||
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
|
||||
def mock_training_loop_function(batch_size):
|
||||
raise RuntimeError("CUDA out of memory.")
|
||||
|
||||
with self.assertRaises(RuntimeError) as cm:
|
||||
mock_training_loop_function()
|
||||
self.assertEqual("CUDA out of memory", cm.args[0])
|
||||
|
||||
def test_pad_and_concatenate_with_1d(self):
|
||||
"""Tests whether pad_and_concatenate works with scalars."""
|
||||
array1 = 1.0
|
||||
array2 = 2.0
|
||||
result = numpy_pad_and_concatenate(array1, array2)
|
||||
self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))
|
||||
|
||||
tensor1 = torch.tensor(1.0)
|
||||
tensor2 = torch.tensor(2.0)
|
||||
result = torch_pad_and_concatenate(tensor1, tensor2)
|
||||
self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))
|
||||
|
||||
def test_remove_columns_collator(self):
|
||||
class MockLogger:
|
||||
def __init__(self) -> None:
|
||||
self.called = 0
|
||||
|
||||
def info(self, msg):
|
||||
self.called += 1
|
||||
self.last_msg = msg
|
||||
|
||||
data_batch = [
|
||||
{"col1": 1, "col2": 2, "col3": 3},
|
||||
{"col1": 1, "col2": 2, "col3": 3},
|
||||
]
|
||||
logger = MockLogger()
|
||||
remove_columns_collator = RemoveColumnsCollator(
|
||||
default_data_collator, ["col1", "col2"], logger, "model", "training"
|
||||
)
|
||||
|
||||
self.assertNotIn("col3", remove_columns_collator(data_batch))
|
||||
# check that the logging message is printed out only once
|
||||
remove_columns_collator(data_batch)
|
||||
remove_columns_collator(data_batch)
|
||||
self.assertEqual(logger.called, 1)
|
||||
self.assertIn("col3", logger.last_msg)
|
||||
|
||||
def test_eval_loop_container(self):
|
||||
batch_1 = [
|
||||
torch.ones([8, 5]),
|
||||
{"loss": torch.tensor(1.0)},
|
||||
(torch.ones([8, 2, 3]), torch.ones([8, 2])),
|
||||
]
|
||||
batch_2 = [
|
||||
torch.ones([4, 5]),
|
||||
{"loss": torch.tensor(2.0)},
|
||||
(torch.ones([4, 2, 3]), torch.ones([4, 6])),
|
||||
]
|
||||
|
||||
concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100)
|
||||
concat_container.add(batch_1)
|
||||
concat_container.add(batch_2)
|
||||
concat_container.to_cpu_and_numpy()
|
||||
arrays = concat_container.get_arrays()
|
||||
|
||||
# Test two nested batches concatenation
|
||||
self.assertIsInstance(arrays, list)
|
||||
self.assertEqual(len(arrays), 3)
|
||||
self.assertIsInstance(arrays[0], np.ndarray)
|
||||
self.assertEqual(arrays[0].shape, (12, 5))
|
||||
self.assertIsInstance(arrays[1], dict)
|
||||
self.assertIsInstance(arrays[1]["loss"], np.ndarray)
|
||||
self.assertEqual(arrays[1]["loss"].shape, (2,))
|
||||
self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0, 2.0])))
|
||||
self.assertIsInstance(arrays[2], tuple)
|
||||
self.assertEqual(len(arrays[2]), 2)
|
||||
self.assertEqual(arrays[2][0].shape, (12, 2, 3))
|
||||
self.assertEqual(arrays[2][1].shape, (12, 6))
|
||||
# check that first batch padded with padding index -100 after concatenation
|
||||
self.assertEqual(arrays[2][1][0][2], -100)
|
||||
|
||||
# Test two batches with no concatenation
|
||||
list_container = EvalLoopContainer(do_nested_concat=False)
|
||||
list_container.add(batch_1)
|
||||
list_container.add(batch_2)
|
||||
list_container.to_cpu_and_numpy()
|
||||
arrays = list_container.get_arrays()
|
||||
|
||||
self.assertEqual(len(arrays), 2)
|
||||
self.assertIsInstance(arrays, list)
|
||||
np_batch_1, np_batch_2 = arrays
|
||||
|
||||
self.assertIsInstance(np_batch_1, list)
|
||||
self.assertEqual(len(np_batch_1), 3)
|
||||
self.assertIsInstance(np_batch_1[0], np.ndarray)
|
||||
self.assertIsInstance(np_batch_1[1], dict)
|
||||
self.assertIsInstance(np_batch_1[2], tuple)
|
||||
self.assertEqual(np_batch_1[0].shape, (8, 5))
|
||||
self.assertEqual(np_batch_1[1]["loss"].shape, ())
|
||||
self.assertEqual(np_batch_1[2][0].shape, (8, 2, 3))
|
||||
self.assertEqual(np_batch_1[2][1].shape, (8, 2))
|
||||
|
||||
self.assertIsInstance(np_batch_2, list)
|
||||
self.assertEqual(len(np_batch_2), 3)
|
||||
self.assertIsInstance(np_batch_2[0], np.ndarray)
|
||||
self.assertIsInstance(np_batch_2[1], dict)
|
||||
self.assertIsInstance(np_batch_2[2], tuple)
|
||||
self.assertEqual(np_batch_2[0].shape, (4, 5))
|
||||
self.assertEqual(np_batch_2[1]["loss"].shape, ())
|
||||
self.assertEqual(np_batch_2[2][0].shape, (4, 2, 3))
|
||||
self.assertEqual(np_batch_2[2][1].shape, (4, 6))
|
||||
|
||||
# Test no batches
|
||||
none_arr = EvalLoopContainer(do_nested_concat=True, padding_index=-100).get_arrays()
|
||||
self.assertIsNone(none_arr)
|
||||
|
||||
none_arr = EvalLoopContainer(do_nested_concat=False).get_arrays()
|
||||
self.assertIsNone(none_arr)
|
||||
|
||||
# Test one batch
|
||||
concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100)
|
||||
concat_container.add(batch_1)
|
||||
arrays = concat_container.get_arrays()
|
||||
self.assertIsInstance(arrays, list)
|
||||
self.assertEqual(len(arrays), 3)
|
||||
self.assertIsInstance(arrays[0], np.ndarray)
|
||||
self.assertEqual(arrays[0].shape, (8, 5))
|
||||
self.assertIsInstance(arrays[1], dict)
|
||||
self.assertIsInstance(arrays[1]["loss"], np.ndarray)
|
||||
self.assertEqual(arrays[1]["loss"].shape, ())
|
||||
self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0])))
|
||||
self.assertIsInstance(arrays[2], tuple)
|
||||
self.assertEqual(len(arrays[2]), 2)
|
||||
self.assertEqual(arrays[2][0].shape, (8, 2, 3))
|
||||
self.assertEqual(arrays[2][1].shape, (8, 2))
|
||||
|
||||
def test_label_smoothing_multi_label_incompatibility(self):
|
||||
"""Test that Trainer warns and disables label smoothing for multi-label classification"""
|
||||
|
||||
# Mock model config with multi-label classification
|
||||
class MockConfig:
|
||||
problem_type = "multi_label_classification"
|
||||
|
||||
class MockModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.config = MockConfig()
|
||||
self.linear = nn.Linear(10, 3)
|
||||
|
||||
def forward(self, **kwargs):
|
||||
return {"logits": torch.randn(2, 3)}
|
||||
|
||||
model = MockModel()
|
||||
|
||||
# Create training args with label smoothing
|
||||
training_args = TrainingArguments(
|
||||
output_dir="./test-trainer",
|
||||
label_smoothing_factor=0.1,
|
||||
per_device_train_batch_size=2,
|
||||
num_train_epochs=1,
|
||||
)
|
||||
|
||||
# Should warn and disable label smoothing
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter("always")
|
||||
trainer = Trainer(model=model, args=training_args)
|
||||
|
||||
# Check warning was issued
|
||||
self.assertEqual(len(w), 1)
|
||||
self.assertIn("Label smoothing is not compatible with multi-label classification", str(w[0].message))
|
||||
|
||||
# Check label_smoother was disabled
|
||||
self.assertIsNone(trainer.label_smoother)
|
||||
Reference in New Issue
Block a user