253 lines
6.0 KiB
JSON
253 lines
6.0 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.857142857142857,
|
|
"eval_steps": 500,
|
|
"global_step": 30,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.09523809523809523,
|
|
"grad_norm": 7.42179012298584,
|
|
"learning_rate": 3.3333333333333333e-06,
|
|
"loss": 1.1578,
|
|
"step": 1
|
|
},
|
|
{
|
|
"epoch": 0.19047619047619047,
|
|
"grad_norm": 7.411426067352295,
|
|
"learning_rate": 6.666666666666667e-06,
|
|
"loss": 1.1462,
|
|
"step": 2
|
|
},
|
|
{
|
|
"epoch": 0.2857142857142857,
|
|
"grad_norm": 6.784687519073486,
|
|
"learning_rate": 1e-05,
|
|
"loss": 1.1288,
|
|
"step": 3
|
|
},
|
|
{
|
|
"epoch": 0.38095238095238093,
|
|
"grad_norm": 3.160092830657959,
|
|
"learning_rate": 9.966191788709716e-06,
|
|
"loss": 0.9638,
|
|
"step": 4
|
|
},
|
|
{
|
|
"epoch": 0.47619047619047616,
|
|
"grad_norm": 5.82558536529541,
|
|
"learning_rate": 9.86522435289912e-06,
|
|
"loss": 0.9276,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.5714285714285714,
|
|
"grad_norm": 6.158791542053223,
|
|
"learning_rate": 9.698463103929542e-06,
|
|
"loss": 0.9728,
|
|
"step": 6
|
|
},
|
|
{
|
|
"epoch": 0.6666666666666666,
|
|
"grad_norm": 5.891656398773193,
|
|
"learning_rate": 9.468163201617063e-06,
|
|
"loss": 0.938,
|
|
"step": 7
|
|
},
|
|
{
|
|
"epoch": 0.7619047619047619,
|
|
"grad_norm": 3.9759347438812256,
|
|
"learning_rate": 9.177439057064684e-06,
|
|
"loss": 0.8628,
|
|
"step": 8
|
|
},
|
|
{
|
|
"epoch": 0.8571428571428571,
|
|
"grad_norm": 3.0468554496765137,
|
|
"learning_rate": 8.83022221559489e-06,
|
|
"loss": 0.8703,
|
|
"step": 9
|
|
},
|
|
{
|
|
"epoch": 0.9523809523809523,
|
|
"grad_norm": 2.659231424331665,
|
|
"learning_rate": 8.43120818934367e-06,
|
|
"loss": 0.8566,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 1.0476190476190477,
|
|
"grad_norm": 4.1684370040893555,
|
|
"learning_rate": 7.985792958513932e-06,
|
|
"loss": 1.2228,
|
|
"step": 11
|
|
},
|
|
{
|
|
"epoch": 1.1428571428571428,
|
|
"grad_norm": 1.5882718563079834,
|
|
"learning_rate": 7.500000000000001e-06,
|
|
"loss": 0.8186,
|
|
"step": 12
|
|
},
|
|
{
|
|
"epoch": 1.2380952380952381,
|
|
"grad_norm": 1.6142245531082153,
|
|
"learning_rate": 6.980398830195785e-06,
|
|
"loss": 0.7946,
|
|
"step": 13
|
|
},
|
|
{
|
|
"epoch": 1.3333333333333333,
|
|
"grad_norm": 1.916320562362671,
|
|
"learning_rate": 6.434016163555452e-06,
|
|
"loss": 0.8539,
|
|
"step": 14
|
|
},
|
|
{
|
|
"epoch": 1.4285714285714286,
|
|
"grad_norm": 1.4405475854873657,
|
|
"learning_rate": 5.8682408883346535e-06,
|
|
"loss": 0.7039,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 1.5238095238095237,
|
|
"grad_norm": 1.19035005569458,
|
|
"learning_rate": 5.290724144552379e-06,
|
|
"loss": 0.8027,
|
|
"step": 16
|
|
},
|
|
{
|
|
"epoch": 1.619047619047619,
|
|
"grad_norm": 0.8802030086517334,
|
|
"learning_rate": 4.7092758554476215e-06,
|
|
"loss": 0.8275,
|
|
"step": 17
|
|
},
|
|
{
|
|
"epoch": 1.7142857142857144,
|
|
"grad_norm": 1.3111690282821655,
|
|
"learning_rate": 4.131759111665349e-06,
|
|
"loss": 0.9132,
|
|
"step": 18
|
|
},
|
|
{
|
|
"epoch": 1.8095238095238095,
|
|
"grad_norm": 0.9706297516822815,
|
|
"learning_rate": 3.5659838364445505e-06,
|
|
"loss": 0.7173,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 1.9047619047619047,
|
|
"grad_norm": 0.9016450047492981,
|
|
"learning_rate": 3.019601169804216e-06,
|
|
"loss": 0.856,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 1.206610083580017,
|
|
"learning_rate": 2.5000000000000015e-06,
|
|
"loss": 1.0468,
|
|
"step": 21
|
|
},
|
|
{
|
|
"epoch": 2.0952380952380953,
|
|
"grad_norm": 0.7914609313011169,
|
|
"learning_rate": 2.0142070414860704e-06,
|
|
"loss": 0.7511,
|
|
"step": 22
|
|
},
|
|
{
|
|
"epoch": 2.1904761904761907,
|
|
"grad_norm": 0.8105864524841309,
|
|
"learning_rate": 1.5687918106563326e-06,
|
|
"loss": 0.6867,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 2.2857142857142856,
|
|
"grad_norm": 0.726883590221405,
|
|
"learning_rate": 1.1697777844051105e-06,
|
|
"loss": 0.7336,
|
|
"step": 24
|
|
},
|
|
{
|
|
"epoch": 2.380952380952381,
|
|
"grad_norm": 0.621177613735199,
|
|
"learning_rate": 8.225609429353187e-07,
|
|
"loss": 0.8412,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 2.4761904761904763,
|
|
"grad_norm": 0.5684846639633179,
|
|
"learning_rate": 5.318367983829393e-07,
|
|
"loss": 0.7218,
|
|
"step": 26
|
|
},
|
|
{
|
|
"epoch": 2.571428571428571,
|
|
"grad_norm": 0.5559888482093811,
|
|
"learning_rate": 3.015368960704584e-07,
|
|
"loss": 0.7787,
|
|
"step": 27
|
|
},
|
|
{
|
|
"epoch": 2.6666666666666665,
|
|
"grad_norm": 0.5531514883041382,
|
|
"learning_rate": 1.3477564710088097e-07,
|
|
"loss": 0.8023,
|
|
"step": 28
|
|
},
|
|
{
|
|
"epoch": 2.761904761904762,
|
|
"grad_norm": 0.5628388524055481,
|
|
"learning_rate": 3.3808211290284886e-08,
|
|
"loss": 0.7005,
|
|
"step": 29
|
|
},
|
|
{
|
|
"epoch": 2.857142857142857,
|
|
"grad_norm": 0.5193307399749756,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.7909,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 2.857142857142857,
|
|
"step": 30,
|
|
"total_flos": 18265262981120.0,
|
|
"train_loss": 0.872953059275945,
|
|
"train_runtime": 1426.5748,
|
|
"train_samples_per_second": 2.103,
|
|
"train_steps_per_second": 0.021
|
|
}
|
|
],
|
|
"logging_steps": 1.0,
|
|
"max_steps": 30,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 18265262981120.0,
|
|
"train_batch_size": 1,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|