373 lines
8.9 KiB
JSON
373 lines
8.9 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 5.0,
|
|
"eval_steps": 500,
|
|
"global_step": 475,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.10614101592115238,
|
|
"grad_norm": 6.5291595458984375,
|
|
"learning_rate": 7.500000000000001e-06,
|
|
"loss": 1.3263,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.21228203184230476,
|
|
"grad_norm": 1.4655342102050781,
|
|
"learning_rate": 1.5833333333333333e-05,
|
|
"loss": 1.1625,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.31842304776345715,
|
|
"grad_norm": 1.383467197418213,
|
|
"learning_rate": 2.4166666666666667e-05,
|
|
"loss": 1.0477,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.4245640636846095,
|
|
"grad_norm": 1.4785548448562622,
|
|
"learning_rate": 3.2500000000000004e-05,
|
|
"loss": 1.0062,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.530705079605762,
|
|
"grad_norm": 1.4572118520736694,
|
|
"learning_rate": 3.999945869500297e-05,
|
|
"loss": 1.0003,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.6368460955269143,
|
|
"grad_norm": 1.7819571495056152,
|
|
"learning_rate": 3.9934537542218075e-05,
|
|
"loss": 0.9682,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.7429871114480667,
|
|
"grad_norm": 1.0432629585266113,
|
|
"learning_rate": 3.9761757921821544e-05,
|
|
"loss": 0.9887,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.849128127369219,
|
|
"grad_norm": 2.0072572231292725,
|
|
"learning_rate": 3.948205468093744e-05,
|
|
"loss": 0.9882,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.9552691432903715,
|
|
"grad_norm": 1.6535046100616455,
|
|
"learning_rate": 3.909694119116433e-05,
|
|
"loss": 0.9592,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.0530705079605762,
|
|
"grad_norm": 1.6055387258529663,
|
|
"learning_rate": 3.860850116027705e-05,
|
|
"loss": 0.9474,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.1592115238817287,
|
|
"grad_norm": 1.1085819005966187,
|
|
"learning_rate": 3.801937735804838e-05,
|
|
"loss": 0.9273,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 1.265352539802881,
|
|
"grad_norm": 1.5138670206069946,
|
|
"learning_rate": 3.7332757317191726e-05,
|
|
"loss": 0.8991,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 1.3714935557240333,
|
|
"grad_norm": 1.4127428531646729,
|
|
"learning_rate": 3.6552356086791176e-05,
|
|
"loss": 0.9057,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.4776345716451857,
|
|
"grad_norm": 0.9714298844337463,
|
|
"learning_rate": 3.568239613153421e-05,
|
|
"loss": 0.8885,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 1.5837755875663382,
|
|
"grad_norm": 0.8646650910377502,
|
|
"learning_rate": 3.472758448550471e-05,
|
|
"loss": 0.9101,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 1.6899166034874904,
|
|
"grad_norm": 0.6955054402351379,
|
|
"learning_rate": 3.3693087284148765e-05,
|
|
"loss": 0.9119,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 1.7960576194086428,
|
|
"grad_norm": 0.8518441915512085,
|
|
"learning_rate": 3.258450181221154e-05,
|
|
"loss": 0.88,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 1.9021986353297953,
|
|
"grad_norm": 0.9373747110366821,
|
|
"learning_rate": 3.140782621888343e-05,
|
|
"loss": 0.8962,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 0.5835213661193848,
|
|
"learning_rate": 3.0169427064015813e-05,
|
|
"loss": 0.9049,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 2.1061410159211524,
|
|
"grad_norm": 0.4349837601184845,
|
|
"learning_rate": 2.887600487100196e-05,
|
|
"loss": 0.8462,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 2.212282031842305,
|
|
"grad_norm": 0.40261805057525635,
|
|
"learning_rate": 2.7534557872703705e-05,
|
|
"loss": 0.8301,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 2.3184230477634573,
|
|
"grad_norm": 0.3540641665458679,
|
|
"learning_rate": 2.615234414658145e-05,
|
|
"loss": 0.8281,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 2.4245640636846097,
|
|
"grad_norm": 0.3487437069416046,
|
|
"learning_rate": 2.4736842343900386e-05,
|
|
"loss": 0.8388,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 2.530705079605762,
|
|
"grad_norm": 0.5164170265197754,
|
|
"learning_rate": 2.3295711225492847e-05,
|
|
"loss": 0.8335,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 2.636846095526914,
|
|
"grad_norm": 0.34246334433555603,
|
|
"learning_rate": 2.1836748223013785e-05,
|
|
"loss": 0.8491,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 2.7429871114480666,
|
|
"grad_norm": 0.4085201621055603,
|
|
"learning_rate": 2.0367847249899443e-05,
|
|
"loss": 0.8155,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 2.849128127369219,
|
|
"grad_norm": 0.3134537637233734,
|
|
"learning_rate": 1.8896955990298364e-05,
|
|
"loss": 0.823,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 2.9552691432903715,
|
|
"grad_norm": 0.3039611279964447,
|
|
"learning_rate": 1.743203289706898e-05,
|
|
"loss": 0.8266,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 3.053070507960576,
|
|
"grad_norm": 0.24728739261627197,
|
|
"learning_rate": 1.5981004131511497e-05,
|
|
"loss": 0.7984,
|
|
"step": 290
|
|
},
|
|
{
|
|
"epoch": 3.1592115238817287,
|
|
"grad_norm": 0.20878377556800842,
|
|
"learning_rate": 1.455172067781763e-05,
|
|
"loss": 0.7883,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 3.265352539802881,
|
|
"grad_norm": 0.1705101877450943,
|
|
"learning_rate": 1.3151915864276115e-05,
|
|
"loss": 0.7863,
|
|
"step": 310
|
|
},
|
|
{
|
|
"epoch": 3.3714935557240335,
|
|
"grad_norm": 0.15946735441684723,
|
|
"learning_rate": 1.1789163521071099e-05,
|
|
"loss": 0.7712,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 3.4776345716451855,
|
|
"grad_norm": 0.19221286475658417,
|
|
"learning_rate": 1.0470837001066219e-05,
|
|
"loss": 0.7943,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 3.583775587566338,
|
|
"grad_norm": 0.19959454238414764,
|
|
"learning_rate": 9.204069285297936e-06,
|
|
"loss": 0.7896,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 3.6899166034874904,
|
|
"grad_norm": 0.13320986926555634,
|
|
"learning_rate": 7.995714389032638e-06,
|
|
"loss": 0.7797,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 3.796057619408643,
|
|
"grad_norm": 0.16105031967163086,
|
|
"learning_rate": 6.852310277205116e-06,
|
|
"loss": 0.801,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 3.9021986353297953,
|
|
"grad_norm": 0.1884138584136963,
|
|
"learning_rate": 5.780043489889415e-06,
|
|
"loss": 0.7722,
|
|
"step": 370
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"grad_norm": 0.16600456833839417,
|
|
"learning_rate": 4.784715669200672e-06,
|
|
"loss": 0.7575,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 4.106141015921152,
|
|
"grad_norm": 0.08295275270938873,
|
|
"learning_rate": 3.8717121687385575e-06,
|
|
"loss": 0.7798,
|
|
"step": 390
|
|
},
|
|
{
|
|
"epoch": 4.212282031842305,
|
|
"grad_norm": 0.10298614203929901,
|
|
"learning_rate": 3.0459729154151095e-06,
|
|
"loss": 0.7812,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 4.318423047763457,
|
|
"grad_norm": 0.09198899567127228,
|
|
"learning_rate": 2.311965681322943e-06,
|
|
"loss": 0.7591,
|
|
"step": 410
|
|
},
|
|
{
|
|
"epoch": 4.42456406368461,
|
|
"grad_norm": 0.08718331158161163,
|
|
"learning_rate": 1.6736619102599073e-06,
|
|
"loss": 0.7507,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 4.530705079605762,
|
|
"grad_norm": 0.09788376837968826,
|
|
"learning_rate": 1.1345152297040273e-06,
|
|
"loss": 0.7636,
|
|
"step": 430
|
|
},
|
|
{
|
|
"epoch": 4.636846095526915,
|
|
"grad_norm": 0.07903146743774414,
|
|
"learning_rate": 6.974427645025427e-07,
|
|
"loss": 0.746,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 4.742987111448067,
|
|
"grad_norm": 0.08080583065748215,
|
|
"learning_rate": 3.648093533798092e-07,
|
|
"loss": 0.7581,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 4.8491281273692195,
|
|
"grad_norm": 0.07727949321269989,
|
|
"learning_rate": 1.3841475366273228e-07,
|
|
"loss": 0.7594,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 4.955269143290371,
|
|
"grad_norm": 0.0837486982345581,
|
|
"learning_rate": 1.948390345430484e-08,
|
|
"loss": 0.7481,
|
|
"step": 470
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"step": 475,
|
|
"total_flos": 3.54463086751976e+19,
|
|
"train_loss": 0.865001671439723,
|
|
"train_runtime": 46695.3235,
|
|
"train_samples_per_second": 1.271,
|
|
"train_steps_per_second": 0.01
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 475,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 5,
|
|
"save_steps": 300,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.54463086751976e+19,
|
|
"train_batch_size": 1,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|