345 lines
8.3 KiB
JSON
345 lines
8.3 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 1.4536757735068517,
|
|
"eval_steps": 500,
|
|
"global_step": 2109,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.0006894768594329053,
|
|
"grad_norm": 3.163306713104248,
|
|
"learning_rate": 0.0,
|
|
"loss": 1.2218,
|
|
"step": 1
|
|
},
|
|
{
|
|
"epoch": 0.03447384297164526,
|
|
"grad_norm": 1.9950363636016846,
|
|
"learning_rate": 1.9999434046461045e-05,
|
|
"loss": 1.067,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.06894768594329052,
|
|
"grad_norm": 1.764167308807373,
|
|
"learning_rate": 1.996249692618611e-05,
|
|
"loss": 0.8991,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.10342152891493579,
|
|
"grad_norm": 1.6243094205856323,
|
|
"learning_rate": 1.9868053167196865e-05,
|
|
"loss": 0.8564,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 0.13789537188658105,
|
|
"grad_norm": 1.573244333267212,
|
|
"learning_rate": 1.971664792831919e-05,
|
|
"loss": 0.836,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.17236921485822632,
|
|
"grad_norm": 1.599184513092041,
|
|
"learning_rate": 1.9509155167802316e-05,
|
|
"loss": 0.8218,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 0.20684305782987158,
|
|
"grad_norm": 1.5806775093078613,
|
|
"learning_rate": 1.9246772598559302e-05,
|
|
"loss": 0.8042,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.24131690080151685,
|
|
"grad_norm": 1.5832141637802124,
|
|
"learning_rate": 1.8931014774594656e-05,
|
|
"loss": 0.793,
|
|
"step": 350
|
|
},
|
|
{
|
|
"epoch": 0.2757907437731621,
|
|
"grad_norm": 1.4803227186203003,
|
|
"learning_rate": 1.8563704348526337e-05,
|
|
"loss": 0.7826,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.3102645867448074,
|
|
"grad_norm": 1.4616361856460571,
|
|
"learning_rate": 1.8146961550666525e-05,
|
|
"loss": 0.7661,
|
|
"step": 450
|
|
},
|
|
{
|
|
"epoch": 0.34473842971645263,
|
|
"grad_norm": 1.559212565422058,
|
|
"learning_rate": 1.7683191950391142e-05,
|
|
"loss": 0.7585,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.37921227268809793,
|
|
"grad_norm": 1.5652285814285278,
|
|
"learning_rate": 1.717507257044331e-05,
|
|
"loss": 0.7478,
|
|
"step": 550
|
|
},
|
|
{
|
|
"epoch": 0.41368611565974317,
|
|
"grad_norm": 1.4887570142745972,
|
|
"learning_rate": 1.6625536434323358e-05,
|
|
"loss": 0.7484,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.4481599586313884,
|
|
"grad_norm": 1.517269492149353,
|
|
"learning_rate": 1.6037755635962587e-05,
|
|
"loss": 0.7409,
|
|
"step": 650
|
|
},
|
|
{
|
|
"epoch": 0.4826338016030337,
|
|
"grad_norm": 1.4647221565246582,
|
|
"learning_rate": 1.5415123029408046e-05,
|
|
"loss": 0.726,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.517107644574679,
|
|
"grad_norm": 1.4331293106079102,
|
|
"learning_rate": 1.4761232644210963e-05,
|
|
"loss": 0.7315,
|
|
"step": 750
|
|
},
|
|
{
|
|
"epoch": 0.5515814875463242,
|
|
"grad_norm": 1.432714819908142,
|
|
"learning_rate": 1.4079858939567557e-05,
|
|
"loss": 0.7153,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.5860553305179695,
|
|
"grad_norm": 1.4351866245269775,
|
|
"learning_rate": 1.3374935016963595e-05,
|
|
"loss": 0.7117,
|
|
"step": 850
|
|
},
|
|
{
|
|
"epoch": 0.6205291734896148,
|
|
"grad_norm": 1.3949848413467407,
|
|
"learning_rate": 1.2650529917086232e-05,
|
|
"loss": 0.7136,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.65500301646126,
|
|
"grad_norm": 1.4337003231048584,
|
|
"learning_rate": 1.1910825132052356e-05,
|
|
"loss": 0.7056,
|
|
"step": 950
|
|
},
|
|
{
|
|
"epoch": 0.6894768594329053,
|
|
"grad_norm": 1.465988278388977,
|
|
"learning_rate": 1.1160090468532266e-05,
|
|
"loss": 0.6969,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.7239507024045505,
|
|
"grad_norm": 1.3694732189178467,
|
|
"learning_rate": 1.0402659401094154e-05,
|
|
"loss": 0.6891,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"epoch": 0.7584245453761959,
|
|
"grad_norm": 1.4759716987609863,
|
|
"learning_rate": 9.642904058037667e-06,
|
|
"loss": 0.6841,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 0.7928983883478411,
|
|
"grad_norm": 1.4173362255096436,
|
|
"learning_rate": 8.885209984106072e-06,
|
|
"loss": 0.6782,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"epoch": 0.8273722313194863,
|
|
"grad_norm": 1.3540033102035522,
|
|
"learning_rate": 8.133950825754511e-06,
|
|
"loss": 0.6702,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 0.8618460742911316,
|
|
"grad_norm": 1.472687840461731,
|
|
"learning_rate": 7.393463085098886e-06,
|
|
"loss": 0.6727,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"epoch": 0.8963199172627768,
|
|
"grad_norm": 1.4779541492462158,
|
|
"learning_rate": 6.6680210882734805e-06,
|
|
"loss": 0.6649,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 0.9307937602344222,
|
|
"grad_norm": 1.4042788743972778,
|
|
"learning_rate": 5.961812312687689e-06,
|
|
"loss": 0.6606,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"epoch": 0.9652676032060674,
|
|
"grad_norm": 1.3935030698776245,
|
|
"learning_rate": 5.278913215600714e-06,
|
|
"loss": 0.6558,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 0.9997414461777127,
|
|
"grad_norm": 1.5017318725585938,
|
|
"learning_rate": 4.623265703539146e-06,
|
|
"loss": 0.6622,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"epoch": 1.0337843661122124,
|
|
"grad_norm": 1.387417197227478,
|
|
"learning_rate": 3.998654378383361e-06,
|
|
"loss": 0.4825,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.0682582090838577,
|
|
"grad_norm": 1.4467589855194092,
|
|
"learning_rate": 3.408684691465355e-06,
|
|
"loss": 0.4759,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"epoch": 1.1027320520555028,
|
|
"grad_norm": 1.4690955877304077,
|
|
"learning_rate": 2.85676213177945e-06,
|
|
"loss": 0.4703,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.1372058950271482,
|
|
"grad_norm": 1.4206316471099854,
|
|
"learning_rate": 2.3460725684379002e-06,
|
|
"loss": 0.4714,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"epoch": 1.1716797379987933,
|
|
"grad_norm": 1.477750539779663,
|
|
"learning_rate": 1.8795638608410016e-06,
|
|
"loss": 0.4648,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.2061535809704387,
|
|
"grad_norm": 1.3695833683013916,
|
|
"learning_rate": 1.4599288427134283e-06,
|
|
"loss": 0.4631,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"epoch": 1.240627423942084,
|
|
"grad_norm": 1.5380635261535645,
|
|
"learning_rate": 1.0895897782283305e-06,
|
|
"loss": 0.4683,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.2751012669137292,
|
|
"grad_norm": 1.4506001472473145,
|
|
"learning_rate": 7.706843799431985e-07,
|
|
"loss": 0.463,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"epoch": 1.3095751098853745,
|
|
"grad_norm": 1.447545051574707,
|
|
"learning_rate": 5.050534692564358e-07,
|
|
"loss": 0.4694,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.3440489528570199,
|
|
"grad_norm": 1.4041807651519775,
|
|
"learning_rate": 2.94230350612239e-07,
|
|
"loss": 0.4621,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"epoch": 1.378522795828665,
|
|
"grad_norm": 1.4376499652862549,
|
|
"learning_rate": 1.3943196078924247e-07,
|
|
"loss": 0.4698,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.4129966388003103,
|
|
"grad_norm": 1.4528664350509644,
|
|
"learning_rate": 4.155184436196669e-08,
|
|
"loss": 0.4617,
|
|
"step": 2050
|
|
},
|
|
{
|
|
"epoch": 1.4474704817719555,
|
|
"grad_norm": 1.4085997343063354,
|
|
"learning_rate": 1.154995882924892e-09,
|
|
"loss": 0.4599,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.4536757735068517,
|
|
"step": 2109,
|
|
"total_flos": 5.7510431551389696e+17,
|
|
"train_loss": 0.659755018267263,
|
|
"train_runtime": 7509.8532,
|
|
"train_samples_per_second": 2.247,
|
|
"train_steps_per_second": 0.281
|
|
}
|
|
],
|
|
"logging_steps": 50,
|
|
"max_steps": 2109,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 2,
|
|
"save_steps": 2000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 5.7510431551389696e+17,
|
|
"train_batch_size": 1,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|