Files
c20/checkpoint-1092/trainer_state.json

1086 lines
27 KiB
JSON
Raw Permalink Normal View History

{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 30,
"global_step": 1092,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018344416418252695,
"grad_norm": 2.4060168266296387,
"learning_rate": 3.272727272727273e-05,
"loss": 2.2041,
"step": 10
},
{
"epoch": 0.03668883283650539,
"grad_norm": 0.6585841774940491,
"learning_rate": 6.90909090909091e-05,
"loss": 0.6873,
"step": 20
},
{
"epoch": 0.05503324925475808,
"grad_norm": 0.5287330150604248,
"learning_rate": 0.00010545454545454545,
"loss": 0.4392,
"step": 30
},
{
"epoch": 0.05503324925475808,
"eval_loss": 0.40955930948257446,
"eval_runtime": 55.5305,
"eval_samples_per_second": 4.142,
"eval_steps_per_second": 4.142,
"step": 30
},
{
"epoch": 0.07337766567301078,
"grad_norm": 0.6942081451416016,
"learning_rate": 0.00014181818181818184,
"loss": 0.3259,
"step": 40
},
{
"epoch": 0.09172208209126347,
"grad_norm": 0.5697287917137146,
"learning_rate": 0.0001781818181818182,
"loss": 0.2795,
"step": 50
},
{
"epoch": 0.11006649850951616,
"grad_norm": 0.4920896887779236,
"learning_rate": 0.0001999926577882564,
"loss": 0.2213,
"step": 60
},
{
"epoch": 0.11006649850951616,
"eval_loss": 0.22321809828281403,
"eval_runtime": 54.872,
"eval_samples_per_second": 4.192,
"eval_steps_per_second": 4.192,
"step": 60
},
{
"epoch": 0.12841091492776885,
"grad_norm": 0.3659333884716034,
"learning_rate": 0.00019991007028765122,
"loss": 0.2043,
"step": 70
},
{
"epoch": 0.14675533134602156,
"grad_norm": 0.2682117223739624,
"learning_rate": 0.0001997357935664527,
"loss": 0.1995,
"step": 80
},
{
"epoch": 0.16509974776427425,
"grad_norm": 0.265464186668396,
"learning_rate": 0.0001994699875614589,
"loss": 0.1663,
"step": 90
},
{
"epoch": 0.16509974776427425,
"eval_loss": 0.18330270051956177,
"eval_runtime": 54.8391,
"eval_samples_per_second": 4.194,
"eval_steps_per_second": 4.194,
"step": 90
},
{
"epoch": 0.18344416418252693,
"grad_norm": 0.4005824029445648,
"learning_rate": 0.000199112896207494,
"loss": 0.1586,
"step": 100
},
{
"epoch": 0.20178858060077964,
"grad_norm": 0.2857477366924286,
"learning_rate": 0.00019866484721354499,
"loss": 0.1596,
"step": 110
},
{
"epoch": 0.22013299701903233,
"grad_norm": 0.18151400983333588,
"learning_rate": 0.00019812625176201745,
"loss": 0.1597,
"step": 120
},
{
"epoch": 0.22013299701903233,
"eval_loss": 0.15281935036182404,
"eval_runtime": 55.1288,
"eval_samples_per_second": 4.172,
"eval_steps_per_second": 4.172,
"step": 120
},
{
"epoch": 0.238477413437285,
"grad_norm": 0.2823415696620941,
"learning_rate": 0.00019749760413138626,
"loss": 0.16,
"step": 130
},
{
"epoch": 0.2568218298555377,
"grad_norm": 0.1662892997264862,
"learning_rate": 0.00019677948124258748,
"loss": 0.1453,
"step": 140
},
{
"epoch": 0.27516624627379044,
"grad_norm": 0.18668028712272644,
"learning_rate": 0.00019597254212956822,
"loss": 0.144,
"step": 150
},
{
"epoch": 0.27516624627379044,
"eval_loss": 0.14610502123832703,
"eval_runtime": 54.955,
"eval_samples_per_second": 4.185,
"eval_steps_per_second": 4.185,
"step": 150
},
{
"epoch": 0.2935106626920431,
"grad_norm": 0.19153951108455658,
"learning_rate": 0.0001950775273344792,
"loss": 0.1508,
"step": 160
},
{
"epoch": 0.3118550791102958,
"grad_norm": 0.5208008885383606,
"learning_rate": 0.00019409525822806662,
"loss": 0.1332,
"step": 170
},
{
"epoch": 0.3301994955285485,
"grad_norm": 0.20292238891124725,
"learning_rate": 0.00019302663625588563,
"loss": 0.1368,
"step": 180
},
{
"epoch": 0.3301994955285485,
"eval_loss": 0.1378733068704605,
"eval_runtime": 55.012,
"eval_samples_per_second": 4.181,
"eval_steps_per_second": 4.181,
"step": 180
},
{
"epoch": 0.3485439119468012,
"grad_norm": 0.15841814875602722,
"learning_rate": 0.0001918726421110282,
"loss": 0.1376,
"step": 190
},
{
"epoch": 0.36688832836505386,
"grad_norm": 0.1384090781211853,
"learning_rate": 0.00019063433483412347,
"loss": 0.1382,
"step": 200
},
{
"epoch": 0.3852327447833066,
"grad_norm": 0.14293262362480164,
"learning_rate": 0.00018931285084143818,
"loss": 0.1328,
"step": 210
},
{
"epoch": 0.3852327447833066,
"eval_loss": 0.134722039103508,
"eval_runtime": 55.0139,
"eval_samples_per_second": 4.181,
"eval_steps_per_second": 4.181,
"step": 210
},
{
"epoch": 0.4035771612015593,
"grad_norm": 0.2324746549129486,
"learning_rate": 0.00018790940288196715,
"loss": 0.135,
"step": 220
},
{
"epoch": 0.42192157761981197,
"grad_norm": 0.1578933596611023,
"learning_rate": 0.00018642527892447243,
"loss": 0.1253,
"step": 230
},
{
"epoch": 0.44026599403806466,
"grad_norm": 0.20755188167095184,
"learning_rate": 0.00018486184097549186,
"loss": 0.1399,
"step": 240
},
{
"epoch": 0.44026599403806466,
"eval_loss": 0.1301199346780777,
"eval_runtime": 55.1504,
"eval_samples_per_second": 4.17,
"eval_steps_per_second": 4.17,
"step": 240
},
{
"epoch": 0.45861041045631734,
"grad_norm": 0.1697942614555359,
"learning_rate": 0.0001832205238294018,
"loss": 0.1229,
"step": 250
},
{
"epoch": 0.47695482687457,
"grad_norm": 0.10918751358985901,
"learning_rate": 0.00018150283375168114,
"loss": 0.1243,
"step": 260
},
{
"epoch": 0.49529924329282277,
"grad_norm": 0.4525628089904785,
"learning_rate": 0.0001797103470965852,
"loss": 0.1351,
"step": 270
},
{
"epoch": 0.49529924329282277,
"eval_loss": 0.12848101556301117,
"eval_runtime": 55.622,
"eval_samples_per_second": 4.135,
"eval_steps_per_second": 4.135,
"step": 270
},
{
"epoch": 0.5136436597110754,
"grad_norm": 0.17496690154075623,
"learning_rate": 0.00017784470886049783,
"loss": 0.1329,
"step": 280
},
{
"epoch": 0.5319880761293282,
"grad_norm": 0.14707504212856293,
"learning_rate": 0.00017590763117228934,
"loss": 0.1317,
"step": 290
},
{
"epoch": 0.5503324925475809,
"grad_norm": 0.15233491361141205,
"learning_rate": 0.00017390089172206592,
"loss": 0.1353,
"step": 300
},
{
"epoch": 0.5503324925475809,
"eval_loss": 0.12714248895645142,
"eval_runtime": 55.4821,
"eval_samples_per_second": 4.145,
"eval_steps_per_second": 4.145,
"step": 300
},
{
"epoch": 0.5686769089658336,
"grad_norm": 0.20287233591079712,
"learning_rate": 0.0001718263321297523,
"loss": 0.1273,
"step": 310
},
{
"epoch": 0.5870213253840862,
"grad_norm": 0.2743270993232727,
"learning_rate": 0.00016968585625500498,
"loss": 0.1373,
"step": 320
},
{
"epoch": 0.6053657418023389,
"grad_norm": 0.40428221225738525,
"learning_rate": 0.0001674814284500068,
"loss": 0.1292,
"step": 330
},
{
"epoch": 0.6053657418023389,
"eval_loss": 0.1255505383014679,
"eval_runtime": 55.4814,
"eval_samples_per_second": 4.146,
"eval_steps_per_second": 4.146,
"step": 330
},
{
"epoch": 0.6237101582205916,
"grad_norm": 0.3385097086429596,
"learning_rate": 0.00016521507175674643,
"loss": 0.1399,
"step": 340
},
{
"epoch": 0.6420545746388443,
"grad_norm": 0.2672514319419861,
"learning_rate": 0.00016288886605043764,
"loss": 0.1345,
"step": 350
},
{
"epoch": 0.660398991057097,
"grad_norm": 0.16421453654766083,
"learning_rate": 0.0001605049461307812,
"loss": 0.1278,
"step": 360
},
{
"epoch": 0.660398991057097,
"eval_loss": 0.1262081414461136,
"eval_runtime": 55.639,
"eval_samples_per_second": 4.134,
"eval_steps_per_second": 4.134,
"step": 360
},
{
"epoch": 0.6787434074753497,
"grad_norm": 0.18313640356063843,
"learning_rate": 0.00015806549976282182,
"loss": 0.1269,
"step": 370
},
{
"epoch": 0.6970878238936024,
"grad_norm": 0.2421526312828064,
"learning_rate": 0.00015557276566919784,
"loss": 0.1352,
"step": 380
},
{
"epoch": 0.715432240311855,
"grad_norm": 0.11791064590215683,
"learning_rate": 0.0001530290314756265,
"loss": 0.1206,
"step": 390
},
{
"epoch": 0.715432240311855,
"eval_loss": 0.12255965173244476,
"eval_runtime": 55.4524,
"eval_samples_per_second": 4.148,
"eval_steps_per_second": 4.148,
"step": 390
},
{
"epoch": 0.7337766567301077,
"grad_norm": 0.10551954060792923,
"learning_rate": 0.00015043663161150937,
"loss": 0.117,
"step": 400
},
{
"epoch": 0.7521210731483605,
"grad_norm": 0.11994520574808121,
"learning_rate": 0.0001477979451675861,
"loss": 0.1266,
"step": 410
},
{
"epoch": 0.7704654895666132,
"grad_norm": 0.11859820783138275,
"learning_rate": 0.00014511539371260074,
"loss": 0.1313,
"step": 420
},
{
"epoch": 0.7704654895666132,
"eval_loss": 0.12076492607593536,
"eval_runtime": 55.5451,
"eval_samples_per_second": 4.141,
"eval_steps_per_second": 4.141,
"step": 420
},
{
"epoch": 0.7888099059848659,
"grad_norm": 0.09068579971790314,
"learning_rate": 0.0001423914390709861,
"loss": 0.1272,
"step": 430
},
{
"epoch": 0.8071543224031186,
"grad_norm": 0.13292035460472107,
"learning_rate": 0.00013962858106360398,
"loss": 0.1346,
"step": 440
},
{
"epoch": 0.8254987388213713,
"grad_norm": 0.2738932967185974,
"learning_rate": 0.00013682935521361627,
"loss": 0.1221,
"step": 450
},
{
"epoch": 0.8254987388213713,
"eval_loss": 0.11899405717849731,
"eval_runtime": 55.8291,
"eval_samples_per_second": 4.12,
"eval_steps_per_second": 4.12,
"step": 450
},
{
"epoch": 0.8438431552396239,
"grad_norm": 0.09868068993091583,
"learning_rate": 0.00013399633041959047,
"loss": 0.1215,
"step": 460
},
{
"epoch": 0.8621875716578766,
"grad_norm": 0.08525373786687851,
"learning_rate": 0.00013113210659797687,
"loss": 0.123,
"step": 470
},
{
"epoch": 0.8805319880761293,
"grad_norm": 0.08514482527971268,
"learning_rate": 0.00012823931229711944,
"loss": 0.1301,
"step": 480
},
{
"epoch": 0.8805319880761293,
"eval_loss": 0.11885283887386322,
"eval_runtime": 55.4431,
"eval_samples_per_second": 4.148,
"eval_steps_per_second": 4.148,
"step": 480
},
{
"epoch": 0.898876404494382,
"grad_norm": 0.09567002952098846,
"learning_rate": 0.00012532060228499136,
"loss": 0.1202,
"step": 490
},
{
"epoch": 0.9172208209126347,
"grad_norm": 0.11083484441041946,
"learning_rate": 0.00012237865511286746,
"loss": 0.1189,
"step": 500
},
{
"epoch": 0.9355652373308874,
"grad_norm": 0.10928696393966675,
"learning_rate": 0.00011941617065717124,
"loss": 0.127,
"step": 510
},
{
"epoch": 0.9355652373308874,
"eval_loss": 0.11898388713598251,
"eval_runtime": 55.5894,
"eval_samples_per_second": 4.137,
"eval_steps_per_second": 4.137,
"step": 510
},
{
"epoch": 0.95390965374914,
"grad_norm": 0.10917045921087265,
"learning_rate": 0.00011643586764175092,
"loss": 0.1203,
"step": 520
},
{
"epoch": 0.9722540701673928,
"grad_norm": 0.11703202873468399,
"learning_rate": 0.00011344048114285882,
"loss": 0.1265,
"step": 530
},
{
"epoch": 0.9905984865856455,
"grad_norm": 0.08545742928981781,
"learning_rate": 0.00011043276007912413,
"loss": 0.1194,
"step": 540
},
{
"epoch": 0.9905984865856455,
"eval_loss": 0.11826686561107635,
"eval_runtime": 55.4536,
"eval_samples_per_second": 4.148,
"eval_steps_per_second": 4.148,
"step": 540
},
{
"epoch": 1.0073377665673011,
"grad_norm": 0.14749501645565033,
"learning_rate": 0.00010741546468882223,
"loss": 0.1094,
"step": 550
},
{
"epoch": 1.0256821829855538,
"grad_norm": 0.08938182145357132,
"learning_rate": 0.00010439136399675542,
"loss": 0.1123,
"step": 560
},
{
"epoch": 1.0440265994038065,
"grad_norm": 0.16764287650585175,
"learning_rate": 0.00010136323327307075,
"loss": 0.1301,
"step": 570
},
{
"epoch": 1.0440265994038065,
"eval_loss": 0.11838380247354507,
"eval_runtime": 55.6355,
"eval_samples_per_second": 4.134,
"eval_steps_per_second": 4.134,
"step": 570
},
{
"epoch": 1.0623710158220592,
"grad_norm": 0.07769570499658585,
"learning_rate": 9.833385148634574e-05,
"loss": 0.1194,
"step": 580
},
{
"epoch": 1.0807154322403119,
"grad_norm": 0.08358050137758255,
"learning_rate": 9.53059987532804e-05,
"loss": 0.1187,
"step": 590
},
{
"epoch": 1.0990598486585645,
"grad_norm": 0.10176610946655273,
"learning_rate": 9.228245378733537e-05,
"loss": 0.1087,
"step": 600
},
{
"epoch": 1.0990598486585645,
"eval_loss": 0.11805912852287292,
"eval_runtime": 55.6508,
"eval_samples_per_second": 4.133,
"eval_steps_per_second": 4.133,
"step": 600
},
{
"epoch": 1.1174042650768172,
"grad_norm": 0.09811729192733765,
"learning_rate": 8.926599134865808e-05,
"loss": 0.1267,
"step": 610
},
{
"epoch": 1.13574868149507,
"grad_norm": 0.08882371336221695,
"learning_rate": 8.625937969763662e-05,
"loss": 0.1291,
"step": 620
},
{
"epoch": 1.1540930979133226,
"grad_norm": 0.07570777833461761,
"learning_rate": 8.326537805441884e-05,
"loss": 0.1182,
"step": 630
},
{
"epoch": 1.1540930979133226,
"eval_loss": 0.11645928770303726,
"eval_runtime": 55.7654,
"eval_samples_per_second": 4.124,
"eval_steps_per_second": 4.124,
"step": 630
},
{
"epoch": 1.1724375143315753,
"grad_norm": 0.06548488140106201,
"learning_rate": 8.028673406672763e-05,
"loss": 0.1148,
"step": 640
},
{
"epoch": 1.190781930749828,
"grad_norm": 0.07197605818510056,
"learning_rate": 7.732618128829656e-05,
"loss": 0.1204,
"step": 650
},
{
"epoch": 1.2091263471680807,
"grad_norm": 0.0930318832397461,
"learning_rate": 7.438643667023979e-05,
"loss": 0.1157,
"step": 660
},
{
"epoch": 1.2091263471680807,
"eval_loss": 0.11639692634344101,
"eval_runtime": 55.6937,
"eval_samples_per_second": 4.13,
"eval_steps_per_second": 4.13,
"step": 660
},
{
"epoch": 1.2274707635863333,
"grad_norm": 0.07502172142267227,
"learning_rate": 7.147019806765836e-05,
"loss": 0.1194,
"step": 670
},
{
"epoch": 1.245815180004586,
"grad_norm": 0.06672611832618713,
"learning_rate": 6.858014176377139e-05,
"loss": 0.119,
"step": 680
},
{
"epoch": 1.264159596422839,
"grad_norm": 0.07496988028287888,
"learning_rate": 6.57189200138442e-05,
"loss": 0.1162,
"step": 690
},
{
"epoch": 1.264159596422839,
"eval_loss": 0.1162952408194542,
"eval_runtime": 55.495,
"eval_samples_per_second": 4.145,
"eval_steps_per_second": 4.145,
"step": 690
},
{
"epoch": 1.2825040128410916,
"grad_norm": 0.08055031299591064,
"learning_rate": 6.288915861116706e-05,
"loss": 0.1193,
"step": 700
},
{
"epoch": 1.3008484292593443,
"grad_norm": 0.0835222527384758,
"learning_rate": 6.009345447731886e-05,
"loss": 0.1166,
"step": 710
},
{
"epoch": 1.319192845677597,
"grad_norm": 0.16637521982192993,
"learning_rate": 5.733437327892661e-05,
"loss": 0.1205,
"step": 720
},
{
"epoch": 1.319192845677597,
"eval_loss": 0.11508560180664062,
"eval_runtime": 55.8161,
"eval_samples_per_second": 4.121,
"eval_steps_per_second": 4.121,
"step": 720
},
{
"epoch": 1.3375372620958497,
"grad_norm": 0.07946062088012695,
"learning_rate": 5.4614447073108375e-05,
"loss": 0.1143,
"step": 730
},
{
"epoch": 1.3558816785141024,
"grad_norm": 0.09776254743337631,
"learning_rate": 5.193617198376004e-05,
"loss": 0.1214,
"step": 740
},
{
"epoch": 1.374226094932355,
"grad_norm": 0.13098250329494476,
"learning_rate": 4.930200591081865e-05,
"loss": 0.1159,
"step": 750
},
{
"epoch": 1.374226094932355,
"eval_loss": 0.1152450293302536,
"eval_runtime": 55.5887,
"eval_samples_per_second": 4.138,
"eval_steps_per_second": 4.138,
"step": 750
},
{
"epoch": 1.3925705113506077,
"grad_norm": 0.07567308843135834,
"learning_rate": 4.671436627460479e-05,
"loss": 0.1178,
"step": 760
},
{
"epoch": 1.4109149277688604,
"grad_norm": 0.09156125038862228,
"learning_rate": 4.417562779731355e-05,
"loss": 0.1157,
"step": 770
},
{
"epoch": 1.429259344187113,
"grad_norm": 0.09289383143186569,
"learning_rate": 4.168812032369026e-05,
"loss": 0.12,
"step": 780
},
{
"epoch": 1.429259344187113,
"eval_loss": 0.11563212424516678,
"eval_runtime": 55.559,
"eval_samples_per_second": 4.14,
"eval_steps_per_second": 4.14,
"step": 780
},
{
"epoch": 1.4476037606053658,
"grad_norm": 0.07849477976560593,
"learning_rate": 3.9254126682891425e-05,
"loss": 0.1205,
"step": 790
},
{
"epoch": 1.4659481770236185,
"grad_norm": 0.09009351581335068,
"learning_rate": 3.68758805934923e-05,
"loss": 0.1188,
"step": 800
},
{
"epoch": 1.4842925934418711,
"grad_norm": 0.0810457393527031,
"learning_rate": 3.455556461356413e-05,
"loss": 0.1199,
"step": 810
},
{
"epoch": 1.4842925934418711,
"eval_loss": 0.11508457362651825,
"eval_runtime": 55.6047,
"eval_samples_per_second": 4.136,
"eval_steps_per_second": 4.136,
"step": 810
},
{
"epoch": 1.5026370098601238,
"grad_norm": 0.07768921554088593,
"learning_rate": 3.229530813770281e-05,
"loss": 0.1109,
"step": 820
},
{
"epoch": 1.5209814262783765,
"grad_norm": 0.08873719722032547,
"learning_rate": 3.0097185442845653e-05,
"loss": 0.1141,
"step": 830
},
{
"epoch": 1.5393258426966292,
"grad_norm": 0.12609460949897766,
"learning_rate": 2.796321378467146e-05,
"loss": 0.1244,
"step": 840
},
{
"epoch": 1.5393258426966292,
"eval_loss": 0.11498970538377762,
"eval_runtime": 55.7795,
"eval_samples_per_second": 4.123,
"eval_steps_per_second": 4.123,
"step": 840
},
{
"epoch": 1.5576702591148819,
"grad_norm": 0.07744992524385452,
"learning_rate": 2.5895351546329717e-05,
"loss": 0.1121,
"step": 850
},
{
"epoch": 1.5760146755331346,
"grad_norm": 0.084147609770298,
"learning_rate": 2.3895496441197806e-05,
"loss": 0.1177,
"step": 860
},
{
"epoch": 1.5943590919513873,
"grad_norm": 0.08521833270788193,
"learning_rate": 2.1965483771316498e-05,
"loss": 0.1223,
"step": 870
},
{
"epoch": 1.5943590919513873,
"eval_loss": 0.11485826224088669,
"eval_runtime": 55.6756,
"eval_samples_per_second": 4.131,
"eval_steps_per_second": 4.131,
"step": 870
},
{
"epoch": 1.61270350836964,
"grad_norm": 0.08053518086671829,
"learning_rate": 2.0107084743101024e-05,
"loss": 0.1114,
"step": 880
},
{
"epoch": 1.6310479247878926,
"grad_norm": 0.07093213498592377,
"learning_rate": 1.8322004841873842e-05,
"loss": 0.1213,
"step": 890
},
{
"epoch": 1.6493923412061453,
"grad_norm": 0.08286295086145401,
"learning_rate": 1.661188226671111e-05,
"loss": 0.1124,
"step": 900
},
{
"epoch": 1.6493923412061453,
"eval_loss": 0.1149599477648735,
"eval_runtime": 55.8031,
"eval_samples_per_second": 4.122,
"eval_steps_per_second": 4.122,
"step": 900
},
{
"epoch": 1.667736757624398,
"grad_norm": 0.07299927622079849,
"learning_rate": 1.4978286427038601e-05,
"loss": 0.1123,
"step": 910
},
{
"epoch": 1.6860811740426507,
"grad_norm": 0.07889826595783234,
"learning_rate": 1.3422716502357102e-05,
"loss": 0.1135,
"step": 920
},
{
"epoch": 1.7044255904609034,
"grad_norm": 0.08562670648097992,
"learning_rate": 1.1946600066419345e-05,
"loss": 0.1193,
"step": 930
},
{
"epoch": 1.7044255904609034,
"eval_loss": 0.11482664942741394,
"eval_runtime": 56.1319,
"eval_samples_per_second": 4.097,
"eval_steps_per_second": 4.097,
"step": 930
},
{
"epoch": 1.722770006879156,
"grad_norm": 0.09471631050109863,
"learning_rate": 1.0551291777120464e-05,
"loss": 0.1173,
"step": 940
},
{
"epoch": 1.7411144232974087,
"grad_norm": 0.07673942297697067,
"learning_rate": 9.238072133304653e-06,
"loss": 0.1121,
"step": 950
},
{
"epoch": 1.7594588397156614,
"grad_norm": 0.10446635633707047,
"learning_rate": 8.00814629962916e-06,
"loss": 0.1212,
"step": 960
},
{
"epoch": 1.7594588397156614,
"eval_loss": 0.11442519724369049,
"eval_runtime": 55.8226,
"eval_samples_per_second": 4.12,
"eval_steps_per_second": 4.12,
"step": 960
},
{
"epoch": 1.777803256133914,
"grad_norm": 0.08229784667491913,
"learning_rate": 6.862643000563407e-06,
"loss": 0.1186,
"step": 970
},
{
"epoch": 1.7961476725521668,
"grad_norm": 0.08047077804803848,
"learning_rate": 5.802613484538888e-06,
"loss": 0.112,
"step": 980
},
{
"epoch": 1.8144920889704195,
"grad_norm": 0.06683830171823502,
"learning_rate": 4.829030559200032e-06,
"loss": 0.1208,
"step": 990
},
{
"epoch": 1.8144920889704195,
"eval_loss": 0.11440839618444443,
"eval_runtime": 55.775,
"eval_samples_per_second": 4.124,
"eval_steps_per_second": 4.124,
"step": 990
},
{
"epoch": 1.8328365053886724,
"grad_norm": 0.07382703572511673,
"learning_rate": 3.942787698641548e-06,
"loss": 0.1272,
"step": 1000
},
{
"epoch": 1.851180921806925,
"grad_norm": 0.07742121815681458,
"learning_rate": 3.1446982234517474e-06,
"loss": 0.1249,
"step": 1010
},
{
"epoch": 1.8695253382251777,
"grad_norm": 0.07977940142154694,
"learning_rate": 2.4354945543138775e-06,
"loss": 0.1149,
"step": 1020
},
{
"epoch": 1.8695253382251777,
"eval_loss": 0.11421651393175125,
"eval_runtime": 55.7914,
"eval_samples_per_second": 4.123,
"eval_steps_per_second": 4.123,
"step": 1020
},
{
"epoch": 1.8878697546434304,
"grad_norm": 0.0936596468091011,
"learning_rate": 1.8158275398508784e-06,
"loss": 0.1146,
"step": 1030
},
{
"epoch": 1.9062141710616831,
"grad_norm": 0.09341125935316086,
"learning_rate": 1.2862658593302046e-06,
"loss": 0.1122,
"step": 1040
},
{
"epoch": 1.9245585874799358,
"grad_norm": 0.07110217958688736,
"learning_rate": 8.472955007769456e-07,
"loss": 0.1131,
"step": 1050
},
{
"epoch": 1.9245585874799358,
"eval_loss": 0.11426779627799988,
"eval_runtime": 55.7778,
"eval_samples_per_second": 4.124,
"eval_steps_per_second": 4.124,
"step": 1050
},
{
"epoch": 1.9429030038981885,
"grad_norm": 0.07995007932186127,
"learning_rate": 4.993193149740338e-07,
"loss": 0.1093,
"step": 1060
},
{
"epoch": 1.9612474203164412,
"grad_norm": 0.08582771569490433,
"learning_rate": 2.426566457590651e-07,
"loss": 0.1198,
"step": 1070
},
{
"epoch": 1.9795918367346939,
"grad_norm": 0.08391684293746948,
"learning_rate": 7.754303695688414e-08,
"loss": 0.115,
"step": 1080
},
{
"epoch": 1.9795918367346939,
"eval_loss": 0.1143002063035965,
"eval_runtime": 55.6892,
"eval_samples_per_second": 4.13,
"eval_steps_per_second": 4.13,
"step": 1080
},
{
"epoch": 1.9979362531529465,
"grad_norm": 0.07199736684560776,
"learning_rate": 4.130016216896682e-09,
"loss": 0.1078,
"step": 1090
}
],
"logging_steps": 10,
"max_steps": 1092,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7977701988429824e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}