435 lines
12 KiB
JSON
435 lines
12 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 1.0,
|
|
"eval_steps": 500,
|
|
"global_step": 2017,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"entropy": 1.3586728751659394,
|
|
"epoch": 0.02479082739386427,
|
|
"grad_norm": 2.15625,
|
|
"learning_rate": 7.277227722772277e-06,
|
|
"loss": 1.4755,
|
|
"mean_token_accuracy": 0.6909261329472065,
|
|
"num_tokens": 12987057.0,
|
|
"step": 50
|
|
},
|
|
{
|
|
"entropy": 1.2912243032455444,
|
|
"epoch": 0.04958165478772854,
|
|
"grad_norm": 0.890625,
|
|
"learning_rate": 1.4702970297029704e-05,
|
|
"loss": 1.2929,
|
|
"mean_token_accuracy": 0.7083150233328342,
|
|
"num_tokens": 25982341.0,
|
|
"step": 100
|
|
},
|
|
{
|
|
"entropy": 1.195389568209648,
|
|
"epoch": 0.07437248218159281,
|
|
"grad_norm": 0.625,
|
|
"learning_rate": 2.212871287128713e-05,
|
|
"loss": 1.1604,
|
|
"mean_token_accuracy": 0.7253781369328499,
|
|
"num_tokens": 38965652.0,
|
|
"step": 150
|
|
},
|
|
{
|
|
"entropy": 1.130024016946554,
|
|
"epoch": 0.09916330957545708,
|
|
"grad_norm": 0.671875,
|
|
"learning_rate": 2.9554455445544555e-05,
|
|
"loss": 1.1103,
|
|
"mean_token_accuracy": 0.7349422338604927,
|
|
"num_tokens": 51955113.0,
|
|
"step": 200
|
|
},
|
|
{
|
|
"entropy": 1.083964577615261,
|
|
"epoch": 0.12395413696932135,
|
|
"grad_norm": 0.6015625,
|
|
"learning_rate": 2.9950390604994976e-05,
|
|
"loss": 1.0694,
|
|
"mean_token_accuracy": 0.7426358266174793,
|
|
"num_tokens": 64940008.0,
|
|
"step": 250
|
|
},
|
|
{
|
|
"entropy": 1.0586608110368252,
|
|
"epoch": 0.14874496436318563,
|
|
"grad_norm": 0.5859375,
|
|
"learning_rate": 2.978907368725783e-05,
|
|
"loss": 1.0479,
|
|
"mean_token_accuracy": 0.7466910901665688,
|
|
"num_tokens": 77929904.0,
|
|
"step": 300
|
|
},
|
|
{
|
|
"entropy": 1.0466859459877014,
|
|
"epoch": 0.1735357917570499,
|
|
"grad_norm": 0.6171875,
|
|
"learning_rate": 2.9517054537806585e-05,
|
|
"loss": 1.0374,
|
|
"mean_token_accuracy": 0.7483436167240143,
|
|
"num_tokens": 90917917.0,
|
|
"step": 350
|
|
},
|
|
{
|
|
"entropy": 1.0320545549690723,
|
|
"epoch": 0.19832661915091415,
|
|
"grad_norm": 0.6015625,
|
|
"learning_rate": 2.9136369330613327e-05,
|
|
"loss": 1.0239,
|
|
"mean_token_accuracy": 0.7508709374070167,
|
|
"num_tokens": 103911070.0,
|
|
"step": 400
|
|
},
|
|
{
|
|
"entropy": 1.0159100976586342,
|
|
"epoch": 0.22311744654477844,
|
|
"grad_norm": 0.5859375,
|
|
"learning_rate": 2.864986764929012e-05,
|
|
"loss": 1.008,
|
|
"mean_token_accuracy": 0.754148696064949,
|
|
"num_tokens": 116908048.0,
|
|
"step": 450
|
|
},
|
|
{
|
|
"entropy": 1.0052815113961697,
|
|
"epoch": 0.2479082739386427,
|
|
"grad_norm": 0.6015625,
|
|
"learning_rate": 2.8061191156796658e-05,
|
|
"loss": 0.9979,
|
|
"mean_token_accuracy": 0.7558307178318501,
|
|
"num_tokens": 129891292.0,
|
|
"step": 500
|
|
},
|
|
{
|
|
"entropy": 0.9971283619105816,
|
|
"epoch": 0.27269910133250697,
|
|
"grad_norm": 0.57421875,
|
|
"learning_rate": 2.737474633611185e-05,
|
|
"loss": 0.9897,
|
|
"mean_token_accuracy": 0.7573503164947033,
|
|
"num_tokens": 142876112.0,
|
|
"step": 550
|
|
},
|
|
{
|
|
"entropy": 0.9957551054656506,
|
|
"epoch": 0.29748992872637126,
|
|
"grad_norm": 0.57421875,
|
|
"learning_rate": 2.6595671505916456e-05,
|
|
"loss": 0.9886,
|
|
"mean_token_accuracy": 0.7574590389430523,
|
|
"num_tokens": 155871785.0,
|
|
"step": 600
|
|
},
|
|
{
|
|
"entropy": 0.9934693467617035,
|
|
"epoch": 0.3222807561202355,
|
|
"grad_norm": 0.5859375,
|
|
"learning_rate": 2.5729798358187858e-05,
|
|
"loss": 0.9863,
|
|
"mean_token_accuracy": 0.7580893678963184,
|
|
"num_tokens": 168864657.0,
|
|
"step": 650
|
|
},
|
|
{
|
|
"entropy": 0.9791013410687447,
|
|
"epoch": 0.3470715835140998,
|
|
"grad_norm": 0.58203125,
|
|
"learning_rate": 2.478360830561368e-05,
|
|
"loss": 0.9721,
|
|
"mean_token_accuracy": 0.7607471886277198,
|
|
"num_tokens": 181847527.0,
|
|
"step": 700
|
|
},
|
|
{
|
|
"entropy": 0.9810490027070046,
|
|
"epoch": 0.37186241090796407,
|
|
"grad_norm": 0.546875,
|
|
"learning_rate": 2.376418396558165e-05,
|
|
"loss": 0.974,
|
|
"mean_token_accuracy": 0.7601690106093884,
|
|
"num_tokens": 194832579.0,
|
|
"step": 750
|
|
},
|
|
{
|
|
"entropy": 0.9764066061377525,
|
|
"epoch": 0.3966532383018283,
|
|
"grad_norm": 0.60546875,
|
|
"learning_rate": 2.2679156143907717e-05,
|
|
"loss": 0.9697,
|
|
"mean_token_accuracy": 0.7612905742228031,
|
|
"num_tokens": 207819397.0,
|
|
"step": 800
|
|
},
|
|
{
|
|
"entropy": 0.9791778637468815,
|
|
"epoch": 0.4214440656956926,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 2.153664671515084e-05,
|
|
"loss": 0.9723,
|
|
"mean_token_accuracy": 0.7604146462678909,
|
|
"num_tokens": 220806448.0,
|
|
"step": 850
|
|
},
|
|
{
|
|
"entropy": 0.9723606041073799,
|
|
"epoch": 0.4462348930895569,
|
|
"grad_norm": 0.5234375,
|
|
"learning_rate": 2.0345207827078517e-05,
|
|
"loss": 0.9652,
|
|
"mean_token_accuracy": 0.7619108897447586,
|
|
"num_tokens": 233800554.0,
|
|
"step": 900
|
|
},
|
|
{
|
|
"entropy": 0.9694940079748631,
|
|
"epoch": 0.4710257204834211,
|
|
"grad_norm": 0.57421875,
|
|
"learning_rate": 1.9113757884362315e-05,
|
|
"loss": 0.9632,
|
|
"mean_token_accuracy": 0.7623741792142391,
|
|
"num_tokens": 246788996.0,
|
|
"step": 950
|
|
},
|
|
{
|
|
"entropy": 0.9737490539252758,
|
|
"epoch": 0.4958165478772854,
|
|
"grad_norm": 0.52734375,
|
|
"learning_rate": 1.7851514790691512e-05,
|
|
"loss": 0.9664,
|
|
"mean_token_accuracy": 0.7615893495082855,
|
|
"num_tokens": 259781274.0,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"entropy": 0.9637996312975884,
|
|
"epoch": 0.5206073752711496,
|
|
"grad_norm": 0.54296875,
|
|
"learning_rate": 1.6567926949014805e-05,
|
|
"loss": 0.9574,
|
|
"mean_token_accuracy": 0.763445483893156,
|
|
"num_tokens": 272766654.0,
|
|
"step": 1050
|
|
},
|
|
{
|
|
"entropy": 0.9670505975186825,
|
|
"epoch": 0.5453982026650139,
|
|
"grad_norm": 0.5546875,
|
|
"learning_rate": 1.5272602536401258e-05,
|
|
"loss": 0.9603,
|
|
"mean_token_accuracy": 0.7628799936175347,
|
|
"num_tokens": 285761894.0,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"entropy": 0.9692679969966411,
|
|
"epoch": 0.5701890300588782,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 1.3975237582927023e-05,
|
|
"loss": 0.9625,
|
|
"mean_token_accuracy": 0.7623443056643009,
|
|
"num_tokens": 298753069.0,
|
|
"step": 1150
|
|
},
|
|
{
|
|
"entropy": 0.9643890456855297,
|
|
"epoch": 0.5949798574527425,
|
|
"grad_norm": 0.5234375,
|
|
"learning_rate": 1.2685543392946551e-05,
|
|
"loss": 0.9578,
|
|
"mean_token_accuracy": 0.7632720285654068,
|
|
"num_tokens": 311750957.0,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"entropy": 0.967204072624445,
|
|
"epoch": 0.6197706848466068,
|
|
"grad_norm": 0.55859375,
|
|
"learning_rate": 1.1413173852029593e-05,
|
|
"loss": 0.9602,
|
|
"mean_token_accuracy": 0.7628644931316376,
|
|
"num_tokens": 324739809.0,
|
|
"step": 1250
|
|
},
|
|
{
|
|
"entropy": 0.9641332650184631,
|
|
"epoch": 0.644561512240471,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 1.0167653163701218e-05,
|
|
"loss": 0.9577,
|
|
"mean_token_accuracy": 0.7634398965537548,
|
|
"num_tokens": 337733441.0,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"entropy": 0.9630196557939052,
|
|
"epoch": 0.6693523396343353,
|
|
"grad_norm": 0.55078125,
|
|
"learning_rate": 8.958304556904608e-06,
|
|
"loss": 0.9561,
|
|
"mean_token_accuracy": 0.7635413825511932,
|
|
"num_tokens": 350729616.0,
|
|
"step": 1350
|
|
},
|
|
{
|
|
"entropy": 0.9601120933890342,
|
|
"epoch": 0.6941431670281996,
|
|
"grad_norm": 0.58203125,
|
|
"learning_rate": 7.794180497840417e-06,
|
|
"loss": 0.9534,
|
|
"mean_token_accuracy": 0.7642950342595577,
|
|
"num_tokens": 363726550.0,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"entropy": 0.959176854044199,
|
|
"epoch": 0.7189339944220638,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 6.683994928575307e-06,
|
|
"loss": 0.9527,
|
|
"mean_token_accuracy": 0.7643388402462006,
|
|
"num_tokens": 376715458.0,
|
|
"step": 1450
|
|
},
|
|
{
|
|
"entropy": 0.9641504181921482,
|
|
"epoch": 0.7437248218159281,
|
|
"grad_norm": 0.54296875,
|
|
"learning_rate": 5.636058039641382e-06,
|
|
"loss": 0.9572,
|
|
"mean_token_accuracy": 0.7632291333377361,
|
|
"num_tokens": 389703743.0,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"entropy": 0.9603779044747353,
|
|
"epoch": 0.7685156492097924,
|
|
"grad_norm": 0.5234375,
|
|
"learning_rate": 4.6582140648800434e-06,
|
|
"loss": 0.9531,
|
|
"mean_token_accuracy": 0.7641150717437267,
|
|
"num_tokens": 402693509.0,
|
|
"step": 1550
|
|
},
|
|
{
|
|
"entropy": 0.9703494548797608,
|
|
"epoch": 0.7933064766036566,
|
|
"grad_norm": 0.515625,
|
|
"learning_rate": 3.7577825641611918e-06,
|
|
"loss": 0.9645,
|
|
"mean_token_accuracy": 0.7619730933010578,
|
|
"num_tokens": 415676636.0,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"entropy": 0.9646534560620785,
|
|
"epoch": 0.8180973039975209,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 2.941503633500518e-06,
|
|
"loss": 0.9582,
|
|
"mean_token_accuracy": 0.7631721512973308,
|
|
"num_tokens": 428672727.0,
|
|
"step": 1650
|
|
},
|
|
{
|
|
"entropy": 0.9584815502166748,
|
|
"epoch": 0.8428881313913852,
|
|
"grad_norm": 0.54296875,
|
|
"learning_rate": 2.215487452699424e-06,
|
|
"loss": 0.9516,
|
|
"mean_token_accuracy": 0.7645354972779751,
|
|
"num_tokens": 441658704.0,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"entropy": 0.9654586097598076,
|
|
"epoch": 0.8676789587852495,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 1.5851685481640143e-06,
|
|
"loss": 0.9581,
|
|
"mean_token_accuracy": 0.7633582444489002,
|
|
"num_tokens": 454644190.0,
|
|
"step": 1750
|
|
},
|
|
{
|
|
"entropy": 0.9650468651950359,
|
|
"epoch": 0.8924697861791138,
|
|
"grad_norm": 0.5390625,
|
|
"learning_rate": 1.0552651132645602e-06,
|
|
"loss": 0.9583,
|
|
"mean_token_accuracy": 0.7630480709671974,
|
|
"num_tokens": 467637106.0,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"entropy": 0.9669888707995414,
|
|
"epoch": 0.917260613572978,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 6.297436907390885e-07,
|
|
"loss": 0.9595,
|
|
"mean_token_accuracy": 0.7629272870719432,
|
|
"num_tokens": 480637699.0,
|
|
"step": 1850
|
|
},
|
|
{
|
|
"entropy": 0.9635004520416259,
|
|
"epoch": 0.9420514409668422,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 3.117894815076755e-07,
|
|
"loss": 0.9571,
|
|
"mean_token_accuracy": 0.7636180396378041,
|
|
"num_tokens": 493631966.0,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"entropy": 0.9683159920573234,
|
|
"epoch": 0.9668422683607065,
|
|
"grad_norm": 0.52734375,
|
|
"learning_rate": 1.0378250214803986e-07,
|
|
"loss": 0.9612,
|
|
"mean_token_accuracy": 0.7624971939623356,
|
|
"num_tokens": 506610108.0,
|
|
"step": 1950
|
|
},
|
|
{
|
|
"entropy": 0.9708416217565536,
|
|
"epoch": 0.9916330957545708,
|
|
"grad_norm": 0.53515625,
|
|
"learning_rate": 7.2797695034398924e-09,
|
|
"loss": 0.9639,
|
|
"mean_token_accuracy": 0.7619807639718056,
|
|
"num_tokens": 519598174.0,
|
|
"step": 2000
|
|
}
|
|
],
|
|
"logging_steps": 50,
|
|
"max_steps": 2017,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 1,
|
|
"save_steps": 50000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 4.431062590428283e+18,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|