308 lines
7.1 KiB
JSON
308 lines
7.1 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.0,
|
|
"eval_steps": 500,
|
|
"global_step": 39,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.07920792079207921,
|
|
"grad_norm": 1.8944026231765747,
|
|
"learning_rate": 0.0,
|
|
"loss": 2.8705,
|
|
"step": 1
|
|
},
|
|
{
|
|
"epoch": 0.15841584158415842,
|
|
"grad_norm": 1.8068678379058838,
|
|
"learning_rate": 2e-05,
|
|
"loss": 2.6876,
|
|
"step": 2
|
|
},
|
|
{
|
|
"epoch": 0.2376237623762376,
|
|
"grad_norm": 1.9159170389175415,
|
|
"learning_rate": 4e-05,
|
|
"loss": 2.8577,
|
|
"step": 3
|
|
},
|
|
{
|
|
"epoch": 0.31683168316831684,
|
|
"grad_norm": 1.672847867012024,
|
|
"learning_rate": 6e-05,
|
|
"loss": 2.7788,
|
|
"step": 4
|
|
},
|
|
{
|
|
"epoch": 0.39603960396039606,
|
|
"grad_norm": 1.6339856386184692,
|
|
"learning_rate": 8e-05,
|
|
"loss": 2.688,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.4752475247524752,
|
|
"grad_norm": 1.4535776376724243,
|
|
"learning_rate": 0.0001,
|
|
"loss": 2.5674,
|
|
"step": 6
|
|
},
|
|
{
|
|
"epoch": 0.5544554455445545,
|
|
"grad_norm": 1.4043422937393188,
|
|
"learning_rate": 9.705882352941177e-05,
|
|
"loss": 2.5515,
|
|
"step": 7
|
|
},
|
|
{
|
|
"epoch": 0.6336633663366337,
|
|
"grad_norm": 1.38013756275177,
|
|
"learning_rate": 9.411764705882353e-05,
|
|
"loss": 2.478,
|
|
"step": 8
|
|
},
|
|
{
|
|
"epoch": 0.7128712871287128,
|
|
"grad_norm": 1.188214898109436,
|
|
"learning_rate": 9.11764705882353e-05,
|
|
"loss": 2.3253,
|
|
"step": 9
|
|
},
|
|
{
|
|
"epoch": 0.7920792079207921,
|
|
"grad_norm": 1.2095648050308228,
|
|
"learning_rate": 8.823529411764706e-05,
|
|
"loss": 2.2612,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.8712871287128713,
|
|
"grad_norm": 1.3112112283706665,
|
|
"learning_rate": 8.529411764705883e-05,
|
|
"loss": 2.2516,
|
|
"step": 11
|
|
},
|
|
{
|
|
"epoch": 0.9504950495049505,
|
|
"grad_norm": 1.1764588356018066,
|
|
"learning_rate": 8.23529411764706e-05,
|
|
"loss": 2.2492,
|
|
"step": 12
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"grad_norm": 1.4241539239883423,
|
|
"learning_rate": 7.941176470588235e-05,
|
|
"loss": 2.2301,
|
|
"step": 13
|
|
},
|
|
{
|
|
"epoch": 1.0792079207920793,
|
|
"grad_norm": 1.2298003435134888,
|
|
"learning_rate": 7.647058823529411e-05,
|
|
"loss": 2.3024,
|
|
"step": 14
|
|
},
|
|
{
|
|
"epoch": 1.1584158415841583,
|
|
"grad_norm": 1.0761587619781494,
|
|
"learning_rate": 7.352941176470589e-05,
|
|
"loss": 2.1472,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 1.2376237623762376,
|
|
"grad_norm": 1.0203983783721924,
|
|
"learning_rate": 7.058823529411765e-05,
|
|
"loss": 2.1082,
|
|
"step": 16
|
|
},
|
|
{
|
|
"epoch": 1.316831683168317,
|
|
"grad_norm": 1.159624457359314,
|
|
"learning_rate": 6.764705882352942e-05,
|
|
"loss": 2.173,
|
|
"step": 17
|
|
},
|
|
{
|
|
"epoch": 1.396039603960396,
|
|
"grad_norm": 1.0887689590454102,
|
|
"learning_rate": 6.470588235294118e-05,
|
|
"loss": 2.0336,
|
|
"step": 18
|
|
},
|
|
{
|
|
"epoch": 1.4752475247524752,
|
|
"grad_norm": 1.0648787021636963,
|
|
"learning_rate": 6.176470588235295e-05,
|
|
"loss": 1.989,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 1.5544554455445545,
|
|
"grad_norm": 1.1488330364227295,
|
|
"learning_rate": 5.882352941176471e-05,
|
|
"loss": 2.0637,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.6336633663366338,
|
|
"grad_norm": 1.0536024570465088,
|
|
"learning_rate": 5.588235294117647e-05,
|
|
"loss": 2.1311,
|
|
"step": 21
|
|
},
|
|
{
|
|
"epoch": 1.7128712871287128,
|
|
"grad_norm": 1.1403701305389404,
|
|
"learning_rate": 5.294117647058824e-05,
|
|
"loss": 2.0831,
|
|
"step": 22
|
|
},
|
|
{
|
|
"epoch": 1.7920792079207921,
|
|
"grad_norm": 1.107954740524292,
|
|
"learning_rate": 5e-05,
|
|
"loss": 2.09,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 1.8712871287128712,
|
|
"grad_norm": 1.0495295524597168,
|
|
"learning_rate": 4.705882352941177e-05,
|
|
"loss": 2.0811,
|
|
"step": 24
|
|
},
|
|
{
|
|
"epoch": 1.9504950495049505,
|
|
"grad_norm": 1.0435138940811157,
|
|
"learning_rate": 4.411764705882353e-05,
|
|
"loss": 2.0862,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 1.3227992057800293,
|
|
"learning_rate": 4.11764705882353e-05,
|
|
"loss": 1.9347,
|
|
"step": 26
|
|
},
|
|
{
|
|
"epoch": 2.0792079207920793,
|
|
"grad_norm": 0.9908578395843506,
|
|
"learning_rate": 3.8235294117647055e-05,
|
|
"loss": 2.0467,
|
|
"step": 27
|
|
},
|
|
{
|
|
"epoch": 2.1584158415841586,
|
|
"grad_norm": 1.0480220317840576,
|
|
"learning_rate": 3.529411764705883e-05,
|
|
"loss": 1.967,
|
|
"step": 28
|
|
},
|
|
{
|
|
"epoch": 2.237623762376238,
|
|
"grad_norm": 1.0408047437667847,
|
|
"learning_rate": 3.235294117647059e-05,
|
|
"loss": 1.9491,
|
|
"step": 29
|
|
},
|
|
{
|
|
"epoch": 2.3168316831683167,
|
|
"grad_norm": 1.163356065750122,
|
|
"learning_rate": 2.9411764705882354e-05,
|
|
"loss": 1.9574,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 2.396039603960396,
|
|
"grad_norm": 1.0859148502349854,
|
|
"learning_rate": 2.647058823529412e-05,
|
|
"loss": 1.8265,
|
|
"step": 31
|
|
},
|
|
{
|
|
"epoch": 2.4752475247524752,
|
|
"grad_norm": 1.0860532522201538,
|
|
"learning_rate": 2.3529411764705884e-05,
|
|
"loss": 1.8592,
|
|
"step": 32
|
|
},
|
|
{
|
|
"epoch": 2.5544554455445545,
|
|
"grad_norm": 1.0774939060211182,
|
|
"learning_rate": 2.058823529411765e-05,
|
|
"loss": 1.9709,
|
|
"step": 33
|
|
},
|
|
{
|
|
"epoch": 2.633663366336634,
|
|
"grad_norm": 1.0513403415679932,
|
|
"learning_rate": 1.7647058823529414e-05,
|
|
"loss": 1.842,
|
|
"step": 34
|
|
},
|
|
{
|
|
"epoch": 2.7128712871287126,
|
|
"grad_norm": 1.0179636478424072,
|
|
"learning_rate": 1.4705882352941177e-05,
|
|
"loss": 1.9262,
|
|
"step": 35
|
|
},
|
|
{
|
|
"epoch": 2.792079207920792,
|
|
"grad_norm": 1.1298041343688965,
|
|
"learning_rate": 1.1764705882352942e-05,
|
|
"loss": 1.9929,
|
|
"step": 36
|
|
},
|
|
{
|
|
"epoch": 2.871287128712871,
|
|
"grad_norm": 1.1205837726593018,
|
|
"learning_rate": 8.823529411764707e-06,
|
|
"loss": 1.8582,
|
|
"step": 37
|
|
},
|
|
{
|
|
"epoch": 2.9504950495049505,
|
|
"grad_norm": 1.1293277740478516,
|
|
"learning_rate": 5.882352941176471e-06,
|
|
"loss": 1.9963,
|
|
"step": 38
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"grad_norm": 1.5077970027923584,
|
|
"learning_rate": 2.9411764705882355e-06,
|
|
"loss": 1.9583,
|
|
"step": 39
|
|
}
|
|
],
|
|
"logging_steps": 1,
|
|
"max_steps": 39,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 5000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1791632051251200.0,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|