3520 lines
92 KiB
JSON
3520 lines
92 KiB
JSON
|
|
{
|
||
|
|
"best_global_step": 416,
|
||
|
|
"best_metric": 0.10842076689004898,
|
||
|
|
"best_model_checkpoint": "saves_bts_preliminary/base/llama-3.2-1b-instruct/train_mrpc_42_1776331557/checkpoint-416",
|
||
|
|
"epoch": 5.0,
|
||
|
|
"eval_steps": 104,
|
||
|
|
"global_step": 2065,
|
||
|
|
"is_hyper_param_search": false,
|
||
|
|
"is_local_process_zero": true,
|
||
|
|
"is_world_process_zero": true,
|
||
|
|
"log_history": [
|
||
|
|
{
|
||
|
|
"epoch": 0.012106537530266344,
|
||
|
|
"grad_norm": 320.0541076660156,
|
||
|
|
"learning_rate": 9.661835748792271e-08,
|
||
|
|
"loss": 0.81,
|
||
|
|
"num_input_tokens_seen": 4352,
|
||
|
|
"step": 5
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.024213075060532687,
|
||
|
|
"grad_norm": 293.6126403808594,
|
||
|
|
"learning_rate": 2.173913043478261e-07,
|
||
|
|
"loss": 0.8,
|
||
|
|
"num_input_tokens_seen": 8768,
|
||
|
|
"step": 10
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.03631961259079903,
|
||
|
|
"grad_norm": 176.21604919433594,
|
||
|
|
"learning_rate": 3.3816425120772945e-07,
|
||
|
|
"loss": 0.6335,
|
||
|
|
"num_input_tokens_seen": 12992,
|
||
|
|
"step": 15
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.048426150121065374,
|
||
|
|
"grad_norm": 67.18666076660156,
|
||
|
|
"learning_rate": 4.5893719806763294e-07,
|
||
|
|
"loss": 0.3717,
|
||
|
|
"num_input_tokens_seen": 17344,
|
||
|
|
"step": 20
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.06053268765133172,
|
||
|
|
"grad_norm": 41.770694732666016,
|
||
|
|
"learning_rate": 5.797101449275363e-07,
|
||
|
|
"loss": 0.2438,
|
||
|
|
"num_input_tokens_seen": 21696,
|
||
|
|
"step": 25
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.07263922518159806,
|
||
|
|
"grad_norm": 54.93859100341797,
|
||
|
|
"learning_rate": 7.004830917874397e-07,
|
||
|
|
"loss": 0.2523,
|
||
|
|
"num_input_tokens_seen": 26112,
|
||
|
|
"step": 30
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.0847457627118644,
|
||
|
|
"grad_norm": 14.744933128356934,
|
||
|
|
"learning_rate": 8.212560386473431e-07,
|
||
|
|
"loss": 0.2223,
|
||
|
|
"num_input_tokens_seen": 30208,
|
||
|
|
"step": 35
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.09685230024213075,
|
||
|
|
"grad_norm": 6.022367477416992,
|
||
|
|
"learning_rate": 9.420289855072465e-07,
|
||
|
|
"loss": 0.2184,
|
||
|
|
"num_input_tokens_seen": 34688,
|
||
|
|
"step": 40
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.1089588377723971,
|
||
|
|
"grad_norm": 39.68272399902344,
|
||
|
|
"learning_rate": 1.0628019323671499e-06,
|
||
|
|
"loss": 0.2163,
|
||
|
|
"num_input_tokens_seen": 38784,
|
||
|
|
"step": 45
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.12106537530266344,
|
||
|
|
"grad_norm": 9.175082206726074,
|
||
|
|
"learning_rate": 1.1835748792270531e-06,
|
||
|
|
"loss": 0.2198,
|
||
|
|
"num_input_tokens_seen": 43200,
|
||
|
|
"step": 50
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.13317191283292978,
|
||
|
|
"grad_norm": 42.212303161621094,
|
||
|
|
"learning_rate": 1.3043478260869566e-06,
|
||
|
|
"loss": 0.2224,
|
||
|
|
"num_input_tokens_seen": 47296,
|
||
|
|
"step": 55
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.14527845036319612,
|
||
|
|
"grad_norm": 5.82745885848999,
|
||
|
|
"learning_rate": 1.42512077294686e-06,
|
||
|
|
"loss": 0.2272,
|
||
|
|
"num_input_tokens_seen": 51712,
|
||
|
|
"step": 60
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.15738498789346247,
|
||
|
|
"grad_norm": 18.959779739379883,
|
||
|
|
"learning_rate": 1.5458937198067634e-06,
|
||
|
|
"loss": 0.1665,
|
||
|
|
"num_input_tokens_seen": 55872,
|
||
|
|
"step": 65
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.1694915254237288,
|
||
|
|
"grad_norm": 54.883968353271484,
|
||
|
|
"learning_rate": 1.6666666666666667e-06,
|
||
|
|
"loss": 0.1835,
|
||
|
|
"num_input_tokens_seen": 59840,
|
||
|
|
"step": 70
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.18159806295399517,
|
||
|
|
"grad_norm": 87.76690673828125,
|
||
|
|
"learning_rate": 1.7874396135265702e-06,
|
||
|
|
"loss": 0.1898,
|
||
|
|
"num_input_tokens_seen": 64000,
|
||
|
|
"step": 75
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.1937046004842615,
|
||
|
|
"grad_norm": 22.004396438598633,
|
||
|
|
"learning_rate": 1.9082125603864736e-06,
|
||
|
|
"loss": 0.2149,
|
||
|
|
"num_input_tokens_seen": 68352,
|
||
|
|
"step": 80
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.20581113801452786,
|
||
|
|
"grad_norm": 13.515002250671387,
|
||
|
|
"learning_rate": 2.028985507246377e-06,
|
||
|
|
"loss": 0.1519,
|
||
|
|
"num_input_tokens_seen": 72768,
|
||
|
|
"step": 85
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.2179176755447942,
|
||
|
|
"grad_norm": 22.01325798034668,
|
||
|
|
"learning_rate": 2.1497584541062806e-06,
|
||
|
|
"loss": 0.1468,
|
||
|
|
"num_input_tokens_seen": 77120,
|
||
|
|
"step": 90
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.23002421307506055,
|
||
|
|
"grad_norm": 24.60677146911621,
|
||
|
|
"learning_rate": 2.270531400966184e-06,
|
||
|
|
"loss": 0.2277,
|
||
|
|
"num_input_tokens_seen": 81664,
|
||
|
|
"step": 95
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.24213075060532688,
|
||
|
|
"grad_norm": 19.255220413208008,
|
||
|
|
"learning_rate": 2.391304347826087e-06,
|
||
|
|
"loss": 0.1552,
|
||
|
|
"num_input_tokens_seen": 86080,
|
||
|
|
"step": 100
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.25181598062953997,
|
||
|
|
"eval_loss": 0.1484687179327011,
|
||
|
|
"eval_runtime": 0.6174,
|
||
|
|
"eval_samples_per_second": 594.395,
|
||
|
|
"eval_steps_per_second": 74.502,
|
||
|
|
"num_input_tokens_seen": 89600,
|
||
|
|
"step": 104
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.2542372881355932,
|
||
|
|
"grad_norm": 51.306358337402344,
|
||
|
|
"learning_rate": 2.5120772946859904e-06,
|
||
|
|
"loss": 0.1673,
|
||
|
|
"num_input_tokens_seen": 90432,
|
||
|
|
"step": 105
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.26634382566585957,
|
||
|
|
"grad_norm": 20.150880813598633,
|
||
|
|
"learning_rate": 2.632850241545894e-06,
|
||
|
|
"loss": 0.1694,
|
||
|
|
"num_input_tokens_seen": 94528,
|
||
|
|
"step": 110
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.2784503631961259,
|
||
|
|
"grad_norm": 16.875028610229492,
|
||
|
|
"learning_rate": 2.7536231884057974e-06,
|
||
|
|
"loss": 0.1627,
|
||
|
|
"num_input_tokens_seen": 98816,
|
||
|
|
"step": 115
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.29055690072639223,
|
||
|
|
"grad_norm": 17.16132164001465,
|
||
|
|
"learning_rate": 2.8743961352657007e-06,
|
||
|
|
"loss": 0.2205,
|
||
|
|
"num_input_tokens_seen": 103104,
|
||
|
|
"step": 120
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.3026634382566586,
|
||
|
|
"grad_norm": 19.57474708557129,
|
||
|
|
"learning_rate": 2.995169082125604e-06,
|
||
|
|
"loss": 0.1841,
|
||
|
|
"num_input_tokens_seen": 107328,
|
||
|
|
"step": 125
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.31476997578692495,
|
||
|
|
"grad_norm": 17.67765998840332,
|
||
|
|
"learning_rate": 3.1159420289855073e-06,
|
||
|
|
"loss": 0.1779,
|
||
|
|
"num_input_tokens_seen": 111488,
|
||
|
|
"step": 130
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.3268765133171913,
|
||
|
|
"grad_norm": 21.58500099182129,
|
||
|
|
"learning_rate": 3.236714975845411e-06,
|
||
|
|
"loss": 0.158,
|
||
|
|
"num_input_tokens_seen": 115968,
|
||
|
|
"step": 135
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.3389830508474576,
|
||
|
|
"grad_norm": 41.02679443359375,
|
||
|
|
"learning_rate": 3.3574879227053142e-06,
|
||
|
|
"loss": 0.2089,
|
||
|
|
"num_input_tokens_seen": 120192,
|
||
|
|
"step": 140
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.35108958837772397,
|
||
|
|
"grad_norm": 13.272953987121582,
|
||
|
|
"learning_rate": 3.4782608695652175e-06,
|
||
|
|
"loss": 0.1007,
|
||
|
|
"num_input_tokens_seen": 124416,
|
||
|
|
"step": 145
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.36319612590799033,
|
||
|
|
"grad_norm": 58.32763671875,
|
||
|
|
"learning_rate": 3.5990338164251208e-06,
|
||
|
|
"loss": 0.2718,
|
||
|
|
"num_input_tokens_seen": 128832,
|
||
|
|
"step": 150
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.37530266343825663,
|
||
|
|
"grad_norm": 46.48350524902344,
|
||
|
|
"learning_rate": 3.7198067632850245e-06,
|
||
|
|
"loss": 0.3704,
|
||
|
|
"num_input_tokens_seen": 132992,
|
||
|
|
"step": 155
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.387409200968523,
|
||
|
|
"grad_norm": 13.923331260681152,
|
||
|
|
"learning_rate": 3.840579710144928e-06,
|
||
|
|
"loss": 0.1945,
|
||
|
|
"num_input_tokens_seen": 137280,
|
||
|
|
"step": 160
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.39951573849878935,
|
||
|
|
"grad_norm": 76.48035430908203,
|
||
|
|
"learning_rate": 3.961352657004831e-06,
|
||
|
|
"loss": 0.253,
|
||
|
|
"num_input_tokens_seen": 141568,
|
||
|
|
"step": 165
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.4116222760290557,
|
||
|
|
"grad_norm": 8.948328971862793,
|
||
|
|
"learning_rate": 4.082125603864734e-06,
|
||
|
|
"loss": 0.1622,
|
||
|
|
"num_input_tokens_seen": 145984,
|
||
|
|
"step": 170
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.423728813559322,
|
||
|
|
"grad_norm": 44.44621276855469,
|
||
|
|
"learning_rate": 4.202898550724638e-06,
|
||
|
|
"loss": 0.1974,
|
||
|
|
"num_input_tokens_seen": 150144,
|
||
|
|
"step": 175
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.4358353510895884,
|
||
|
|
"grad_norm": 12.341259956359863,
|
||
|
|
"learning_rate": 4.323671497584541e-06,
|
||
|
|
"loss": 0.2191,
|
||
|
|
"num_input_tokens_seen": 154624,
|
||
|
|
"step": 180
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.44794188861985473,
|
||
|
|
"grad_norm": 13.725370407104492,
|
||
|
|
"learning_rate": 4.444444444444444e-06,
|
||
|
|
"loss": 0.2192,
|
||
|
|
"num_input_tokens_seen": 158784,
|
||
|
|
"step": 185
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.4600484261501211,
|
||
|
|
"grad_norm": 9.8717622756958,
|
||
|
|
"learning_rate": 4.565217391304348e-06,
|
||
|
|
"loss": 0.1887,
|
||
|
|
"num_input_tokens_seen": 163072,
|
||
|
|
"step": 190
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.4721549636803874,
|
||
|
|
"grad_norm": 15.11948299407959,
|
||
|
|
"learning_rate": 4.6859903381642516e-06,
|
||
|
|
"loss": 0.1951,
|
||
|
|
"num_input_tokens_seen": 167104,
|
||
|
|
"step": 195
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.48426150121065376,
|
||
|
|
"grad_norm": 10.298319816589355,
|
||
|
|
"learning_rate": 4.806763285024155e-06,
|
||
|
|
"loss": 0.1486,
|
||
|
|
"num_input_tokens_seen": 171456,
|
||
|
|
"step": 200
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.4963680387409201,
|
||
|
|
"grad_norm": 22.512561798095703,
|
||
|
|
"learning_rate": 4.927536231884059e-06,
|
||
|
|
"loss": 0.2178,
|
||
|
|
"num_input_tokens_seen": 175808,
|
||
|
|
"step": 205
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5036319612590799,
|
||
|
|
"eval_loss": 0.1319892704486847,
|
||
|
|
"eval_runtime": 0.6163,
|
||
|
|
"eval_samples_per_second": 595.512,
|
||
|
|
"eval_steps_per_second": 74.642,
|
||
|
|
"num_input_tokens_seen": 178688,
|
||
|
|
"step": 208
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5084745762711864,
|
||
|
|
"grad_norm": 11.861852645874023,
|
||
|
|
"learning_rate": 4.999985705205496e-06,
|
||
|
|
"loss": 0.1052,
|
||
|
|
"num_input_tokens_seen": 180224,
|
||
|
|
"step": 210
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5205811138014528,
|
||
|
|
"grad_norm": 15.08198070526123,
|
||
|
|
"learning_rate": 4.999824890644693e-06,
|
||
|
|
"loss": 0.1655,
|
||
|
|
"num_input_tokens_seen": 184704,
|
||
|
|
"step": 215
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5326876513317191,
|
||
|
|
"grad_norm": 9.965168952941895,
|
||
|
|
"learning_rate": 4.999485404562269e-06,
|
||
|
|
"loss": 0.3684,
|
||
|
|
"num_input_tokens_seen": 189184,
|
||
|
|
"step": 220
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5447941888619855,
|
||
|
|
"grad_norm": 7.275655269622803,
|
||
|
|
"learning_rate": 4.998967271222521e-06,
|
||
|
|
"loss": 0.1527,
|
||
|
|
"num_input_tokens_seen": 193536,
|
||
|
|
"step": 225
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5569007263922519,
|
||
|
|
"grad_norm": 7.0880584716796875,
|
||
|
|
"learning_rate": 4.998270527658311e-06,
|
||
|
|
"loss": 0.1238,
|
||
|
|
"num_input_tokens_seen": 197888,
|
||
|
|
"step": 230
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5690072639225182,
|
||
|
|
"grad_norm": 27.60887908935547,
|
||
|
|
"learning_rate": 4.997395223668422e-06,
|
||
|
|
"loss": 0.2147,
|
||
|
|
"num_input_tokens_seen": 202112,
|
||
|
|
"step": 235
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5811138014527845,
|
||
|
|
"grad_norm": 43.02740478515625,
|
||
|
|
"learning_rate": 4.996341421813993e-06,
|
||
|
|
"loss": 0.1162,
|
||
|
|
"num_input_tokens_seen": 206528,
|
||
|
|
"step": 240
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.5932203389830508,
|
||
|
|
"grad_norm": 30.406055450439453,
|
||
|
|
"learning_rate": 4.995109197414051e-06,
|
||
|
|
"loss": 0.1311,
|
||
|
|
"num_input_tokens_seen": 210944,
|
||
|
|
"step": 245
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6053268765133172,
|
||
|
|
"grad_norm": 14.91820240020752,
|
||
|
|
"learning_rate": 4.9936986385401305e-06,
|
||
|
|
"loss": 0.1437,
|
||
|
|
"num_input_tokens_seen": 215104,
|
||
|
|
"step": 250
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6174334140435835,
|
||
|
|
"grad_norm": 20.09491729736328,
|
||
|
|
"learning_rate": 4.992109846009972e-06,
|
||
|
|
"loss": 0.1597,
|
||
|
|
"num_input_tokens_seen": 219328,
|
||
|
|
"step": 255
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6295399515738499,
|
||
|
|
"grad_norm": 6.193624973297119,
|
||
|
|
"learning_rate": 4.990342933380321e-06,
|
||
|
|
"loss": 0.1878,
|
||
|
|
"num_input_tokens_seen": 223680,
|
||
|
|
"step": 260
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6416464891041163,
|
||
|
|
"grad_norm": 6.540223121643066,
|
||
|
|
"learning_rate": 4.988398026938811e-06,
|
||
|
|
"loss": 0.1445,
|
||
|
|
"num_input_tokens_seen": 227904,
|
||
|
|
"step": 265
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6537530266343826,
|
||
|
|
"grad_norm": 17.89214515686035,
|
||
|
|
"learning_rate": 4.986275265694935e-06,
|
||
|
|
"loss": 0.0992,
|
||
|
|
"num_input_tokens_seen": 231936,
|
||
|
|
"step": 270
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6658595641646489,
|
||
|
|
"grad_norm": 0.7999329566955566,
|
||
|
|
"learning_rate": 4.983974801370115e-06,
|
||
|
|
"loss": 0.0608,
|
||
|
|
"num_input_tokens_seen": 236160,
|
||
|
|
"step": 275
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6779661016949152,
|
||
|
|
"grad_norm": 36.78638458251953,
|
||
|
|
"learning_rate": 4.981496798386849e-06,
|
||
|
|
"loss": 0.2262,
|
||
|
|
"num_input_tokens_seen": 240320,
|
||
|
|
"step": 280
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.6900726392251816,
|
||
|
|
"grad_norm": 18.634634017944336,
|
||
|
|
"learning_rate": 4.9788414338569715e-06,
|
||
|
|
"loss": 0.1165,
|
||
|
|
"num_input_tokens_seen": 244800,
|
||
|
|
"step": 285
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7021791767554479,
|
||
|
|
"grad_norm": 35.7069091796875,
|
||
|
|
"learning_rate": 4.9760088975689815e-06,
|
||
|
|
"loss": 0.2377,
|
||
|
|
"num_input_tokens_seen": 249152,
|
||
|
|
"step": 290
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7142857142857143,
|
||
|
|
"grad_norm": 10.031001091003418,
|
||
|
|
"learning_rate": 4.972999391974488e-06,
|
||
|
|
"loss": 0.1377,
|
||
|
|
"num_input_tokens_seen": 253376,
|
||
|
|
"step": 295
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7263922518159807,
|
||
|
|
"grad_norm": 7.3871612548828125,
|
||
|
|
"learning_rate": 4.969813132173735e-06,
|
||
|
|
"loss": 0.19,
|
||
|
|
"num_input_tokens_seen": 257664,
|
||
|
|
"step": 300
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.738498789346247,
|
||
|
|
"grad_norm": 16.09194564819336,
|
||
|
|
"learning_rate": 4.966450345900229e-06,
|
||
|
|
"loss": 0.1146,
|
||
|
|
"num_input_tokens_seen": 262016,
|
||
|
|
"step": 305
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7506053268765133,
|
||
|
|
"grad_norm": 7.399415969848633,
|
||
|
|
"learning_rate": 4.962911273504461e-06,
|
||
|
|
"loss": 0.1165,
|
||
|
|
"num_input_tokens_seen": 266432,
|
||
|
|
"step": 310
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7554479418886199,
|
||
|
|
"eval_loss": 0.11303775012493134,
|
||
|
|
"eval_runtime": 1.7273,
|
||
|
|
"eval_samples_per_second": 212.47,
|
||
|
|
"eval_steps_per_second": 26.631,
|
||
|
|
"num_input_tokens_seen": 267968,
|
||
|
|
"step": 312
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7627118644067796,
|
||
|
|
"grad_norm": 11.618612289428711,
|
||
|
|
"learning_rate": 4.959196167936729e-06,
|
||
|
|
"loss": 0.181,
|
||
|
|
"num_input_tokens_seen": 270464,
|
||
|
|
"step": 315
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.774818401937046,
|
||
|
|
"grad_norm": 11.343527793884277,
|
||
|
|
"learning_rate": 4.955305294729056e-06,
|
||
|
|
"loss": 0.0946,
|
||
|
|
"num_input_tokens_seen": 274688,
|
||
|
|
"step": 320
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7869249394673123,
|
||
|
|
"grad_norm": 9.781023025512695,
|
||
|
|
"learning_rate": 4.9512389319762165e-06,
|
||
|
|
"loss": 0.1293,
|
||
|
|
"num_input_tokens_seen": 278848,
|
||
|
|
"step": 325
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.7990314769975787,
|
||
|
|
"grad_norm": 23.389354705810547,
|
||
|
|
"learning_rate": 4.946997370315857e-06,
|
||
|
|
"loss": 0.124,
|
||
|
|
"num_input_tokens_seen": 283136,
|
||
|
|
"step": 330
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8111380145278451,
|
||
|
|
"grad_norm": 21.524974822998047,
|
||
|
|
"learning_rate": 4.9425809129077204e-06,
|
||
|
|
"loss": 0.1767,
|
||
|
|
"num_input_tokens_seen": 287680,
|
||
|
|
"step": 335
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8232445520581114,
|
||
|
|
"grad_norm": 12.489716529846191,
|
||
|
|
"learning_rate": 4.937989875411986e-06,
|
||
|
|
"loss": 0.0811,
|
||
|
|
"num_input_tokens_seen": 292224,
|
||
|
|
"step": 340
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8353510895883777,
|
||
|
|
"grad_norm": 11.355611801147461,
|
||
|
|
"learning_rate": 4.933224585966696e-06,
|
||
|
|
"loss": 0.1567,
|
||
|
|
"num_input_tokens_seen": 296448,
|
||
|
|
"step": 345
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.847457627118644,
|
||
|
|
"grad_norm": 24.681901931762695,
|
||
|
|
"learning_rate": 4.928285385164316e-06,
|
||
|
|
"loss": 0.1363,
|
||
|
|
"num_input_tokens_seen": 300736,
|
||
|
|
"step": 350
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8595641646489104,
|
||
|
|
"grad_norm": 8.876485824584961,
|
||
|
|
"learning_rate": 4.92317262602738e-06,
|
||
|
|
"loss": 0.1348,
|
||
|
|
"num_input_tokens_seen": 304960,
|
||
|
|
"step": 355
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8716707021791767,
|
||
|
|
"grad_norm": 20.233808517456055,
|
||
|
|
"learning_rate": 4.917886673983267e-06,
|
||
|
|
"loss": 0.1694,
|
||
|
|
"num_input_tokens_seen": 309184,
|
||
|
|
"step": 360
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8837772397094431,
|
||
|
|
"grad_norm": 20.60609245300293,
|
||
|
|
"learning_rate": 4.912427906838079e-06,
|
||
|
|
"loss": 0.1352,
|
||
|
|
"num_input_tokens_seen": 313408,
|
||
|
|
"step": 365
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.8958837772397095,
|
||
|
|
"grad_norm": 6.115711688995361,
|
||
|
|
"learning_rate": 4.906796714749635e-06,
|
||
|
|
"loss": 0.0933,
|
||
|
|
"num_input_tokens_seen": 317888,
|
||
|
|
"step": 370
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9079903147699758,
|
||
|
|
"grad_norm": 18.18195152282715,
|
||
|
|
"learning_rate": 4.900993500199591e-06,
|
||
|
|
"loss": 0.1488,
|
||
|
|
"num_input_tokens_seen": 322048,
|
||
|
|
"step": 375
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9200968523002422,
|
||
|
|
"grad_norm": 12.584397315979004,
|
||
|
|
"learning_rate": 4.895018677964669e-06,
|
||
|
|
"loss": 0.087,
|
||
|
|
"num_input_tokens_seen": 326592,
|
||
|
|
"step": 380
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9322033898305084,
|
||
|
|
"grad_norm": 7.245577812194824,
|
||
|
|
"learning_rate": 4.888872675087012e-06,
|
||
|
|
"loss": 0.1017,
|
||
|
|
"num_input_tokens_seen": 330880,
|
||
|
|
"step": 385
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9443099273607748,
|
||
|
|
"grad_norm": 51.712425231933594,
|
||
|
|
"learning_rate": 4.882555930843664e-06,
|
||
|
|
"loss": 0.1105,
|
||
|
|
"num_input_tokens_seen": 335104,
|
||
|
|
"step": 390
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9564164648910412,
|
||
|
|
"grad_norm": 25.56722068786621,
|
||
|
|
"learning_rate": 4.876068896715171e-06,
|
||
|
|
"loss": 0.1437,
|
||
|
|
"num_input_tokens_seen": 339392,
|
||
|
|
"step": 395
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9685230024213075,
|
||
|
|
"grad_norm": 20.534461975097656,
|
||
|
|
"learning_rate": 4.8694120363533105e-06,
|
||
|
|
"loss": 0.146,
|
||
|
|
"num_input_tokens_seen": 343744,
|
||
|
|
"step": 400
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9806295399515739,
|
||
|
|
"grad_norm": 8.779641151428223,
|
||
|
|
"learning_rate": 4.862585825547957e-06,
|
||
|
|
"loss": 0.0985,
|
||
|
|
"num_input_tokens_seen": 348160,
|
||
|
|
"step": 405
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 0.9927360774818402,
|
||
|
|
"grad_norm": 17.242847442626953,
|
||
|
|
"learning_rate": 4.855590752193075e-06,
|
||
|
|
"loss": 0.116,
|
||
|
|
"num_input_tokens_seen": 352448,
|
||
|
|
"step": 410
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0048426150121066,
|
||
|
|
"grad_norm": 13.249277114868164,
|
||
|
|
"learning_rate": 4.848427316251843e-06,
|
||
|
|
"loss": 0.1193,
|
||
|
|
"num_input_tokens_seen": 356656,
|
||
|
|
"step": 415
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0072639225181599,
|
||
|
|
"eval_loss": 0.10842076689004898,
|
||
|
|
"eval_runtime": 0.63,
|
||
|
|
"eval_samples_per_second": 582.523,
|
||
|
|
"eval_steps_per_second": 73.014,
|
||
|
|
"num_input_tokens_seen": 357488,
|
||
|
|
"step": 416
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0169491525423728,
|
||
|
|
"grad_norm": 1.1179414987564087,
|
||
|
|
"learning_rate": 4.841096029720921e-06,
|
||
|
|
"loss": 0.073,
|
||
|
|
"num_input_tokens_seen": 360880,
|
||
|
|
"step": 420
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0290556900726393,
|
||
|
|
"grad_norm": 31.719369888305664,
|
||
|
|
"learning_rate": 4.833597416593861e-06,
|
||
|
|
"loss": 0.0535,
|
||
|
|
"num_input_tokens_seen": 365104,
|
||
|
|
"step": 425
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0411622276029056,
|
||
|
|
"grad_norm": 48.02503204345703,
|
||
|
|
"learning_rate": 4.825932012823652e-06,
|
||
|
|
"loss": 0.1458,
|
||
|
|
"num_input_tokens_seen": 369776,
|
||
|
|
"step": 430
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.053268765133172,
|
||
|
|
"grad_norm": 97.04767608642578,
|
||
|
|
"learning_rate": 4.818100366284408e-06,
|
||
|
|
"loss": 0.1602,
|
||
|
|
"num_input_tokens_seen": 374000,
|
||
|
|
"step": 435
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0653753026634383,
|
||
|
|
"grad_norm": 37.753238677978516,
|
||
|
|
"learning_rate": 4.81010303673222e-06,
|
||
|
|
"loss": 0.2577,
|
||
|
|
"num_input_tokens_seen": 378096,
|
||
|
|
"step": 440
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.0774818401937045,
|
||
|
|
"grad_norm": 0.17760241031646729,
|
||
|
|
"learning_rate": 4.80194059576514e-06,
|
||
|
|
"loss": 0.0566,
|
||
|
|
"num_input_tokens_seen": 382256,
|
||
|
|
"step": 445
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.089588377723971,
|
||
|
|
"grad_norm": 34.652015686035156,
|
||
|
|
"learning_rate": 4.793613626782331e-06,
|
||
|
|
"loss": 0.1761,
|
||
|
|
"num_input_tokens_seen": 386672,
|
||
|
|
"step": 450
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1016949152542372,
|
||
|
|
"grad_norm": 28.04759407043457,
|
||
|
|
"learning_rate": 4.785122724942367e-06,
|
||
|
|
"loss": 0.0591,
|
||
|
|
"num_input_tokens_seen": 390960,
|
||
|
|
"step": 455
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1138014527845037,
|
||
|
|
"grad_norm": 2.957566976547241,
|
||
|
|
"learning_rate": 4.7764684971206974e-06,
|
||
|
|
"loss": 0.0952,
|
||
|
|
"num_input_tokens_seen": 395440,
|
||
|
|
"step": 460
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.12590799031477,
|
||
|
|
"grad_norm": 44.540069580078125,
|
||
|
|
"learning_rate": 4.767651561866269e-06,
|
||
|
|
"loss": 0.0664,
|
||
|
|
"num_input_tokens_seen": 399600,
|
||
|
|
"step": 465
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1380145278450362,
|
||
|
|
"grad_norm": 24.51837730407715,
|
||
|
|
"learning_rate": 4.758672549357316e-06,
|
||
|
|
"loss": 0.1001,
|
||
|
|
"num_input_tokens_seen": 403888,
|
||
|
|
"step": 470
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1501210653753027,
|
||
|
|
"grad_norm": 40.098114013671875,
|
||
|
|
"learning_rate": 4.7495321013563225e-06,
|
||
|
|
"loss": 0.2506,
|
||
|
|
"num_input_tokens_seen": 408176,
|
||
|
|
"step": 475
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.162227602905569,
|
||
|
|
"grad_norm": 2.672497510910034,
|
||
|
|
"learning_rate": 4.740230871164148e-06,
|
||
|
|
"loss": 0.044,
|
||
|
|
"num_input_tokens_seen": 412208,
|
||
|
|
"step": 480
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1743341404358354,
|
||
|
|
"grad_norm": 0.2532678544521332,
|
||
|
|
"learning_rate": 4.730769523573337e-06,
|
||
|
|
"loss": 0.1472,
|
||
|
|
"num_input_tokens_seen": 416624,
|
||
|
|
"step": 485
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1864406779661016,
|
||
|
|
"grad_norm": 4.472592830657959,
|
||
|
|
"learning_rate": 4.721148734820605e-06,
|
||
|
|
"loss": 0.1661,
|
||
|
|
"num_input_tokens_seen": 421040,
|
||
|
|
"step": 490
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.1985472154963681,
|
||
|
|
"grad_norm": 37.826171875,
|
||
|
|
"learning_rate": 4.711369192538503e-06,
|
||
|
|
"loss": 0.094,
|
||
|
|
"num_input_tokens_seen": 425136,
|
||
|
|
"step": 495
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2106537530266344,
|
||
|
|
"grad_norm": 14.548481941223145,
|
||
|
|
"learning_rate": 4.701431595706269e-06,
|
||
|
|
"loss": 0.1282,
|
||
|
|
"num_input_tokens_seen": 429680,
|
||
|
|
"step": 500
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2227602905569008,
|
||
|
|
"grad_norm": 24.612041473388672,
|
||
|
|
"learning_rate": 4.691336654599873e-06,
|
||
|
|
"loss": 0.0874,
|
||
|
|
"num_input_tokens_seen": 434224,
|
||
|
|
"step": 505
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.234866828087167,
|
||
|
|
"grad_norm": 11.3072509765625,
|
||
|
|
"learning_rate": 4.6810850907412486e-06,
|
||
|
|
"loss": 0.0403,
|
||
|
|
"num_input_tokens_seen": 438320,
|
||
|
|
"step": 510
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2469733656174333,
|
||
|
|
"grad_norm": 9.126791000366211,
|
||
|
|
"learning_rate": 4.6706776368467236e-06,
|
||
|
|
"loss": 0.0227,
|
||
|
|
"num_input_tokens_seen": 442672,
|
||
|
|
"step": 515
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2590799031476998,
|
||
|
|
"grad_norm": 23.92775535583496,
|
||
|
|
"learning_rate": 4.6601150367746485e-06,
|
||
|
|
"loss": 0.0685,
|
||
|
|
"num_input_tokens_seen": 446896,
|
||
|
|
"step": 520
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2590799031476998,
|
||
|
|
"eval_loss": 0.19028596580028534,
|
||
|
|
"eval_runtime": 0.7167,
|
||
|
|
"eval_samples_per_second": 512.081,
|
||
|
|
"eval_steps_per_second": 64.185,
|
||
|
|
"num_input_tokens_seen": 446896,
|
||
|
|
"step": 520
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.271186440677966,
|
||
|
|
"grad_norm": 43.593448638916016,
|
||
|
|
"learning_rate": 4.649398045472235e-06,
|
||
|
|
"loss": 0.1008,
|
||
|
|
"num_input_tokens_seen": 451312,
|
||
|
|
"step": 525
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2832929782082325,
|
||
|
|
"grad_norm": 8.737812042236328,
|
||
|
|
"learning_rate": 4.638527428921592e-06,
|
||
|
|
"loss": 0.3076,
|
||
|
|
"num_input_tokens_seen": 455408,
|
||
|
|
"step": 530
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.2953995157384988,
|
||
|
|
"grad_norm": 2.4155948162078857,
|
||
|
|
"learning_rate": 4.627503964084981e-06,
|
||
|
|
"loss": 0.0462,
|
||
|
|
"num_input_tokens_seen": 460080,
|
||
|
|
"step": 535
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.307506053268765,
|
||
|
|
"grad_norm": 2.872014284133911,
|
||
|
|
"learning_rate": 4.616328438849284e-06,
|
||
|
|
"loss": 0.0124,
|
||
|
|
"num_input_tokens_seen": 464496,
|
||
|
|
"step": 540
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.3196125907990315,
|
||
|
|
"grad_norm": 34.14030456542969,
|
||
|
|
"learning_rate": 4.605001651969686e-06,
|
||
|
|
"loss": 0.1408,
|
||
|
|
"num_input_tokens_seen": 468720,
|
||
|
|
"step": 545
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.331719128329298,
|
||
|
|
"grad_norm": 59.56473922729492,
|
||
|
|
"learning_rate": 4.5935244130125925e-06,
|
||
|
|
"loss": 0.115,
|
||
|
|
"num_input_tokens_seen": 473264,
|
||
|
|
"step": 550
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.3438256658595642,
|
||
|
|
"grad_norm": 1.2305806875228882,
|
||
|
|
"learning_rate": 4.581897542297761e-06,
|
||
|
|
"loss": 0.0061,
|
||
|
|
"num_input_tokens_seen": 477552,
|
||
|
|
"step": 555
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.3559322033898304,
|
||
|
|
"grad_norm": 52.619632720947266,
|
||
|
|
"learning_rate": 4.570121870839671e-06,
|
||
|
|
"loss": 0.0843,
|
||
|
|
"num_input_tokens_seen": 482032,
|
||
|
|
"step": 560
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.368038740920097,
|
||
|
|
"grad_norm": 97.8170394897461,
|
||
|
|
"learning_rate": 4.558198240288131e-06,
|
||
|
|
"loss": 0.0764,
|
||
|
|
"num_input_tokens_seen": 486384,
|
||
|
|
"step": 565
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.3801452784503632,
|
||
|
|
"grad_norm": 35.3338737487793,
|
||
|
|
"learning_rate": 4.5461275028681186e-06,
|
||
|
|
"loss": 0.1836,
|
||
|
|
"num_input_tokens_seen": 490672,
|
||
|
|
"step": 570
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.3922518159806296,
|
||
|
|
"grad_norm": 27.42061996459961,
|
||
|
|
"learning_rate": 4.533910521318872e-06,
|
||
|
|
"loss": 0.1097,
|
||
|
|
"num_input_tokens_seen": 494960,
|
||
|
|
"step": 575
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4043583535108959,
|
||
|
|
"grad_norm": 7.799497604370117,
|
||
|
|
"learning_rate": 4.521548168832227e-06,
|
||
|
|
"loss": 0.1144,
|
||
|
|
"num_input_tokens_seen": 499120,
|
||
|
|
"step": 580
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4164648910411621,
|
||
|
|
"grad_norm": 11.070670127868652,
|
||
|
|
"learning_rate": 4.509041328990204e-06,
|
||
|
|
"loss": 0.0169,
|
||
|
|
"num_input_tokens_seen": 503408,
|
||
|
|
"step": 585
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4285714285714286,
|
||
|
|
"grad_norm": 5.323161602020264,
|
||
|
|
"learning_rate": 4.496390895701858e-06,
|
||
|
|
"loss": 0.0424,
|
||
|
|
"num_input_tokens_seen": 507312,
|
||
|
|
"step": 590
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4406779661016949,
|
||
|
|
"grad_norm": 26.43401527404785,
|
||
|
|
"learning_rate": 4.483597773139387e-06,
|
||
|
|
"loss": 0.053,
|
||
|
|
"num_input_tokens_seen": 511600,
|
||
|
|
"step": 595
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4527845036319613,
|
||
|
|
"grad_norm": 0.05975044146180153,
|
||
|
|
"learning_rate": 4.470662875673506e-06,
|
||
|
|
"loss": 0.0615,
|
||
|
|
"num_input_tokens_seen": 515888,
|
||
|
|
"step": 600
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4648910411622276,
|
||
|
|
"grad_norm": 52.62843322753906,
|
||
|
|
"learning_rate": 4.4575871278080964e-06,
|
||
|
|
"loss": 0.2071,
|
||
|
|
"num_input_tokens_seen": 519920,
|
||
|
|
"step": 605
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4769975786924938,
|
||
|
|
"grad_norm": 10.014227867126465,
|
||
|
|
"learning_rate": 4.444371464114126e-06,
|
||
|
|
"loss": 0.0688,
|
||
|
|
"num_input_tokens_seen": 524336,
|
||
|
|
"step": 610
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.4891041162227603,
|
||
|
|
"grad_norm": 0.5611757636070251,
|
||
|
|
"learning_rate": 4.431016829162851e-06,
|
||
|
|
"loss": 0.071,
|
||
|
|
"num_input_tokens_seen": 528496,
|
||
|
|
"step": 615
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5012106537530268,
|
||
|
|
"grad_norm": 0.10402300208806992,
|
||
|
|
"learning_rate": 4.417524177458309e-06,
|
||
|
|
"loss": 0.0801,
|
||
|
|
"num_input_tokens_seen": 532784,
|
||
|
|
"step": 620
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5108958837772397,
|
||
|
|
"eval_loss": 0.1981746405363083,
|
||
|
|
"eval_runtime": 0.6398,
|
||
|
|
"eval_samples_per_second": 573.626,
|
||
|
|
"eval_steps_per_second": 71.899,
|
||
|
|
"num_input_tokens_seen": 536176,
|
||
|
|
"step": 624
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.513317191283293,
|
||
|
|
"grad_norm": 24.07758331298828,
|
||
|
|
"learning_rate": 4.403894473369092e-06,
|
||
|
|
"loss": 0.0258,
|
||
|
|
"num_input_tokens_seen": 537136,
|
||
|
|
"step": 625
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5254237288135593,
|
||
|
|
"grad_norm": 48.458106994628906,
|
||
|
|
"learning_rate": 4.390128691059423e-06,
|
||
|
|
"loss": 0.199,
|
||
|
|
"num_input_tokens_seen": 541552,
|
||
|
|
"step": 630
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5375302663438255,
|
||
|
|
"grad_norm": 8.228415489196777,
|
||
|
|
"learning_rate": 4.376227814419524e-06,
|
||
|
|
"loss": 0.1964,
|
||
|
|
"num_input_tokens_seen": 545648,
|
||
|
|
"step": 635
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.549636803874092,
|
||
|
|
"grad_norm": 12.28972339630127,
|
||
|
|
"learning_rate": 4.3621928369952995e-06,
|
||
|
|
"loss": 0.06,
|
||
|
|
"num_input_tokens_seen": 550256,
|
||
|
|
"step": 640
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5617433414043584,
|
||
|
|
"grad_norm": 15.129716873168945,
|
||
|
|
"learning_rate": 4.348024761917321e-06,
|
||
|
|
"loss": 0.1114,
|
||
|
|
"num_input_tokens_seen": 554928,
|
||
|
|
"step": 645
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5738498789346247,
|
||
|
|
"grad_norm": 0.27470338344573975,
|
||
|
|
"learning_rate": 4.333724601829132e-06,
|
||
|
|
"loss": 0.0725,
|
||
|
|
"num_input_tokens_seen": 559344,
|
||
|
|
"step": 650
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.585956416464891,
|
||
|
|
"grad_norm": 0.1773267388343811,
|
||
|
|
"learning_rate": 4.319293378814868e-06,
|
||
|
|
"loss": 0.1308,
|
||
|
|
"num_input_tokens_seen": 563760,
|
||
|
|
"step": 655
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.5980629539951574,
|
||
|
|
"grad_norm": 14.22355842590332,
|
||
|
|
"learning_rate": 4.3047321243262065e-06,
|
||
|
|
"loss": 0.0653,
|
||
|
|
"num_input_tokens_seen": 568112,
|
||
|
|
"step": 660
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6101694915254239,
|
||
|
|
"grad_norm": 0.1500890851020813,
|
||
|
|
"learning_rate": 4.290041879108641e-06,
|
||
|
|
"loss": 0.006,
|
||
|
|
"num_input_tokens_seen": 572464,
|
||
|
|
"step": 665
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6222760290556901,
|
||
|
|
"grad_norm": 30.997364044189453,
|
||
|
|
"learning_rate": 4.275223693127103e-06,
|
||
|
|
"loss": 0.0771,
|
||
|
|
"num_input_tokens_seen": 576752,
|
||
|
|
"step": 670
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6343825665859564,
|
||
|
|
"grad_norm": 44.13563919067383,
|
||
|
|
"learning_rate": 4.260278625490911e-06,
|
||
|
|
"loss": 0.034,
|
||
|
|
"num_input_tokens_seen": 580976,
|
||
|
|
"step": 675
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6464891041162226,
|
||
|
|
"grad_norm": 1.0693022012710571,
|
||
|
|
"learning_rate": 4.245207744378075e-06,
|
||
|
|
"loss": 0.1429,
|
||
|
|
"num_input_tokens_seen": 585264,
|
||
|
|
"step": 680
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.658595641646489,
|
||
|
|
"grad_norm": 32.29472351074219,
|
||
|
|
"learning_rate": 4.2300121269589475e-06,
|
||
|
|
"loss": 0.0664,
|
||
|
|
"num_input_tokens_seen": 589744,
|
||
|
|
"step": 685
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6707021791767556,
|
||
|
|
"grad_norm": 40.11149597167969,
|
||
|
|
"learning_rate": 4.2146928593192375e-06,
|
||
|
|
"loss": 0.0792,
|
||
|
|
"num_input_tokens_seen": 593968,
|
||
|
|
"step": 690
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.6828087167070218,
|
||
|
|
"grad_norm": 43.619258880615234,
|
||
|
|
"learning_rate": 4.19925103638238e-06,
|
||
|
|
"loss": 0.1061,
|
||
|
|
"num_input_tokens_seen": 598256,
|
||
|
|
"step": 695
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.694915254237288,
|
||
|
|
"grad_norm": 0.6466928720474243,
|
||
|
|
"learning_rate": 4.183687761831282e-06,
|
||
|
|
"loss": 0.0958,
|
||
|
|
"num_input_tokens_seen": 602608,
|
||
|
|
"step": 700
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7070217917675545,
|
||
|
|
"grad_norm": 27.355270385742188,
|
||
|
|
"learning_rate": 4.168004148029435e-06,
|
||
|
|
"loss": 0.1234,
|
||
|
|
"num_input_tokens_seen": 607088,
|
||
|
|
"step": 705
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7191283292978208,
|
||
|
|
"grad_norm": 17.83440399169922,
|
||
|
|
"learning_rate": 4.152201315941414e-06,
|
||
|
|
"loss": 0.1094,
|
||
|
|
"num_input_tokens_seen": 611248,
|
||
|
|
"step": 710
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7312348668280872,
|
||
|
|
"grad_norm": 1.9912621974945068,
|
||
|
|
"learning_rate": 4.136280395052754e-06,
|
||
|
|
"loss": 0.1047,
|
||
|
|
"num_input_tokens_seen": 615536,
|
||
|
|
"step": 715
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7433414043583535,
|
||
|
|
"grad_norm": 7.247110843658447,
|
||
|
|
"learning_rate": 4.120242523289223e-06,
|
||
|
|
"loss": 0.0341,
|
||
|
|
"num_input_tokens_seen": 619952,
|
||
|
|
"step": 720
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7554479418886197,
|
||
|
|
"grad_norm": 42.79171371459961,
|
||
|
|
"learning_rate": 4.104088846935493e-06,
|
||
|
|
"loss": 0.2066,
|
||
|
|
"num_input_tokens_seen": 624368,
|
||
|
|
"step": 725
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7627118644067796,
|
||
|
|
"eval_loss": 0.14485575258731842,
|
||
|
|
"eval_runtime": 0.6316,
|
||
|
|
"eval_samples_per_second": 581.029,
|
||
|
|
"eval_steps_per_second": 72.826,
|
||
|
|
"num_input_tokens_seen": 626992,
|
||
|
|
"step": 728
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7675544794188862,
|
||
|
|
"grad_norm": 15.863067626953125,
|
||
|
|
"learning_rate": 4.087820520553205e-06,
|
||
|
|
"loss": 0.0104,
|
||
|
|
"num_input_tokens_seen": 628720,
|
||
|
|
"step": 730
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.7796610169491527,
|
||
|
|
"grad_norm": 4.8767571449279785,
|
||
|
|
"learning_rate": 4.071438706898457e-06,
|
||
|
|
"loss": 0.0572,
|
||
|
|
"num_input_tokens_seen": 633008,
|
||
|
|
"step": 735
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.791767554479419,
|
||
|
|
"grad_norm": 8.256195068359375,
|
||
|
|
"learning_rate": 4.0549445768386895e-06,
|
||
|
|
"loss": 0.1222,
|
||
|
|
"num_input_tokens_seen": 637360,
|
||
|
|
"step": 740
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8038740920096852,
|
||
|
|
"grad_norm": 40.003360748291016,
|
||
|
|
"learning_rate": 4.038339309269002e-06,
|
||
|
|
"loss": 0.1171,
|
||
|
|
"num_input_tokens_seen": 641648,
|
||
|
|
"step": 745
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8159806295399514,
|
||
|
|
"grad_norm": 1.1402125358581543,
|
||
|
|
"learning_rate": 4.021624091027895e-06,
|
||
|
|
"loss": 0.1638,
|
||
|
|
"num_input_tokens_seen": 645552,
|
||
|
|
"step": 750
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.828087167070218,
|
||
|
|
"grad_norm": 19.136348724365234,
|
||
|
|
"learning_rate": 4.00480011681244e-06,
|
||
|
|
"loss": 0.1092,
|
||
|
|
"num_input_tokens_seen": 649904,
|
||
|
|
"step": 755
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8401937046004844,
|
||
|
|
"grad_norm": 16.968360900878906,
|
||
|
|
"learning_rate": 3.987868589092894e-06,
|
||
|
|
"loss": 0.1118,
|
||
|
|
"num_input_tokens_seen": 654128,
|
||
|
|
"step": 760
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8523002421307506,
|
||
|
|
"grad_norm": 15.944784164428711,
|
||
|
|
"learning_rate": 3.970830718026746e-06,
|
||
|
|
"loss": 0.1015,
|
||
|
|
"num_input_tokens_seen": 658672,
|
||
|
|
"step": 765
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8644067796610169,
|
||
|
|
"grad_norm": 20.015836715698242,
|
||
|
|
"learning_rate": 3.9536877213722335e-06,
|
||
|
|
"loss": 0.1207,
|
||
|
|
"num_input_tokens_seen": 663088,
|
||
|
|
"step": 770
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8765133171912833,
|
||
|
|
"grad_norm": 30.892627716064453,
|
||
|
|
"learning_rate": 3.936440824401299e-06,
|
||
|
|
"loss": 0.083,
|
||
|
|
"num_input_tokens_seen": 667440,
|
||
|
|
"step": 775
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.8886198547215496,
|
||
|
|
"grad_norm": 19.33950424194336,
|
||
|
|
"learning_rate": 3.919091259812013e-06,
|
||
|
|
"loss": 0.0249,
|
||
|
|
"num_input_tokens_seen": 671792,
|
||
|
|
"step": 780
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.900726392251816,
|
||
|
|
"grad_norm": 24.891815185546875,
|
||
|
|
"learning_rate": 3.901640267640475e-06,
|
||
|
|
"loss": 0.0425,
|
||
|
|
"num_input_tokens_seen": 676336,
|
||
|
|
"step": 785
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9128329297820823,
|
||
|
|
"grad_norm": 28.49574089050293,
|
||
|
|
"learning_rate": 3.884089095172181e-06,
|
||
|
|
"loss": 0.0402,
|
||
|
|
"num_input_tokens_seen": 680624,
|
||
|
|
"step": 790
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9249394673123486,
|
||
|
|
"grad_norm": 0.016377810388803482,
|
||
|
|
"learning_rate": 3.866438996852873e-06,
|
||
|
|
"loss": 0.0155,
|
||
|
|
"num_input_tokens_seen": 685040,
|
||
|
|
"step": 795
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.937046004842615,
|
||
|
|
"grad_norm": 0.16267594695091248,
|
||
|
|
"learning_rate": 3.848691234198879e-06,
|
||
|
|
"loss": 0.0372,
|
||
|
|
"num_input_tokens_seen": 689392,
|
||
|
|
"step": 800
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9491525423728815,
|
||
|
|
"grad_norm": 1.1569898128509521,
|
||
|
|
"learning_rate": 3.830847075706957e-06,
|
||
|
|
"loss": 0.1257,
|
||
|
|
"num_input_tokens_seen": 693552,
|
||
|
|
"step": 805
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9612590799031477,
|
||
|
|
"grad_norm": 1.0089043378829956,
|
||
|
|
"learning_rate": 3.812907796763616e-06,
|
||
|
|
"loss": 0.0454,
|
||
|
|
"num_input_tokens_seen": 698032,
|
||
|
|
"step": 810
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.973365617433414,
|
||
|
|
"grad_norm": 0.9869695901870728,
|
||
|
|
"learning_rate": 3.794874679553975e-06,
|
||
|
|
"loss": 0.2136,
|
||
|
|
"num_input_tokens_seen": 702000,
|
||
|
|
"step": 815
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9854721549636802,
|
||
|
|
"grad_norm": 23.745838165283203,
|
||
|
|
"learning_rate": 3.7767490129701057e-06,
|
||
|
|
"loss": 0.1643,
|
||
|
|
"num_input_tokens_seen": 706160,
|
||
|
|
"step": 820
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 1.9975786924939467,
|
||
|
|
"grad_norm": 4.922607898712158,
|
||
|
|
"learning_rate": 3.7585320925189246e-06,
|
||
|
|
"loss": 0.0475,
|
||
|
|
"num_input_tokens_seen": 710768,
|
||
|
|
"step": 825
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.009685230024213,
|
||
|
|
"grad_norm": 0.3702712655067444,
|
||
|
|
"learning_rate": 3.7402252202295876e-06,
|
||
|
|
"loss": 0.0011,
|
||
|
|
"num_input_tokens_seen": 714744,
|
||
|
|
"step": 830
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.0145278450363198,
|
||
|
|
"eval_loss": 0.2067757099866867,
|
||
|
|
"eval_runtime": 0.634,
|
||
|
|
"eval_samples_per_second": 578.871,
|
||
|
|
"eval_steps_per_second": 72.556,
|
||
|
|
"num_input_tokens_seen": 716344,
|
||
|
|
"step": 832
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.0217917675544794,
|
||
|
|
"grad_norm": 66.86534118652344,
|
||
|
|
"learning_rate": 3.7218297045604362e-06,
|
||
|
|
"loss": 0.0057,
|
||
|
|
"num_input_tokens_seen": 718776,
|
||
|
|
"step": 835
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.0338983050847457,
|
||
|
|
"grad_norm": 0.06272957473993301,
|
||
|
|
"learning_rate": 3.703346860305473e-06,
|
||
|
|
"loss": 0.0114,
|
||
|
|
"num_input_tokens_seen": 722744,
|
||
|
|
"step": 840
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.046004842615012,
|
||
|
|
"grad_norm": 0.00682666152715683,
|
||
|
|
"learning_rate": 3.6847780085003908e-06,
|
||
|
|
"loss": 0.0047,
|
||
|
|
"num_input_tokens_seen": 727160,
|
||
|
|
"step": 845
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.0581113801452786,
|
||
|
|
"grad_norm": 60.80389404296875,
|
||
|
|
"learning_rate": 3.666124476328155e-06,
|
||
|
|
"loss": 0.0867,
|
||
|
|
"num_input_tokens_seen": 731576,
|
||
|
|
"step": 850
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.070217917675545,
|
||
|
|
"grad_norm": 15.216704368591309,
|
||
|
|
"learning_rate": 3.647387597024139e-06,
|
||
|
|
"loss": 0.0084,
|
||
|
|
"num_input_tokens_seen": 736184,
|
||
|
|
"step": 855
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.082324455205811,
|
||
|
|
"grad_norm": 0.1580037623643875,
|
||
|
|
"learning_rate": 3.6285687097808396e-06,
|
||
|
|
"loss": 0.0011,
|
||
|
|
"num_input_tokens_seen": 740472,
|
||
|
|
"step": 860
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.0944309927360774,
|
||
|
|
"grad_norm": 56.17728805541992,
|
||
|
|
"learning_rate": 3.609669159652158e-06,
|
||
|
|
"loss": 0.0528,
|
||
|
|
"num_input_tokens_seen": 744760,
|
||
|
|
"step": 865
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.106537530266344,
|
||
|
|
"grad_norm": 0.03734096884727478,
|
||
|
|
"learning_rate": 3.5906902974572623e-06,
|
||
|
|
"loss": 0.0003,
|
||
|
|
"num_input_tokens_seen": 749176,
|
||
|
|
"step": 870
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.1186440677966103,
|
||
|
|
"grad_norm": 0.013891610316932201,
|
||
|
|
"learning_rate": 3.5716334796840403e-06,
|
||
|
|
"loss": 0.0329,
|
||
|
|
"num_input_tokens_seen": 753528,
|
||
|
|
"step": 875
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.1307506053268765,
|
||
|
|
"grad_norm": 0.11342939734458923,
|
||
|
|
"learning_rate": 3.5525000683921467e-06,
|
||
|
|
"loss": 0.0022,
|
||
|
|
"num_input_tokens_seen": 757688,
|
||
|
|
"step": 880
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.142857142857143,
|
||
|
|
"grad_norm": 0.012256304733455181,
|
||
|
|
"learning_rate": 3.533291431115653e-06,
|
||
|
|
"loss": 0.0268,
|
||
|
|
"num_input_tokens_seen": 762040,
|
||
|
|
"step": 885
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.154963680387409,
|
||
|
|
"grad_norm": 55.32374572753906,
|
||
|
|
"learning_rate": 3.514008940765304e-06,
|
||
|
|
"loss": 0.0746,
|
||
|
|
"num_input_tokens_seen": 766200,
|
||
|
|
"step": 890
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.1670702179176757,
|
||
|
|
"grad_norm": 0.006180486176162958,
|
||
|
|
"learning_rate": 3.494653975530388e-06,
|
||
|
|
"loss": 0.0202,
|
||
|
|
"num_input_tokens_seen": 770680,
|
||
|
|
"step": 895
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.179176755447942,
|
||
|
|
"grad_norm": 0.8855127096176147,
|
||
|
|
"learning_rate": 3.475227918780239e-06,
|
||
|
|
"loss": 0.0023,
|
||
|
|
"num_input_tokens_seen": 774840,
|
||
|
|
"step": 900
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.1912832929782082,
|
||
|
|
"grad_norm": 0.0062964423559606075,
|
||
|
|
"learning_rate": 3.455732158965356e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 779192,
|
||
|
|
"step": 905
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2033898305084745,
|
||
|
|
"grad_norm": 0.005455203354358673,
|
||
|
|
"learning_rate": 3.436168089518168e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 783608,
|
||
|
|
"step": 910
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2154963680387407,
|
||
|
|
"grad_norm": 40.38529968261719,
|
||
|
|
"learning_rate": 3.4165371087534428e-06,
|
||
|
|
"loss": 0.0365,
|
||
|
|
"num_input_tokens_seen": 788088,
|
||
|
|
"step": 915
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2276029055690074,
|
||
|
|
"grad_norm": 0.01571381278336048,
|
||
|
|
"learning_rate": 3.396840619768338e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 792568,
|
||
|
|
"step": 920
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2397094430992737,
|
||
|
|
"grad_norm": 0.002371912356466055,
|
||
|
|
"learning_rate": 3.377080030342125e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 797176,
|
||
|
|
"step": 925
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.25181598062954,
|
||
|
|
"grad_norm": 31.288312911987305,
|
||
|
|
"learning_rate": 3.3572567528355614e-06,
|
||
|
|
"loss": 0.0038,
|
||
|
|
"num_input_tokens_seen": 801400,
|
||
|
|
"step": 930
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.263922518159806,
|
||
|
|
"grad_norm": 0.009536500088870525,
|
||
|
|
"learning_rate": 3.3373722040899515e-06,
|
||
|
|
"loss": 0.0059,
|
||
|
|
"num_input_tokens_seen": 805944,
|
||
|
|
"step": 935
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2663438256658597,
|
||
|
|
"eval_loss": 0.26913806796073914,
|
||
|
|
"eval_runtime": 0.697,
|
||
|
|
"eval_samples_per_second": 526.575,
|
||
|
|
"eval_steps_per_second": 66.001,
|
||
|
|
"num_input_tokens_seen": 806712,
|
||
|
|
"step": 936
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.2760290556900724,
|
||
|
|
"grad_norm": 0.0007474619778804481,
|
||
|
|
"learning_rate": 3.3174278053258753e-06,
|
||
|
|
"loss": 0.0006,
|
||
|
|
"num_input_tokens_seen": 810040,
|
||
|
|
"step": 940
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.288135593220339,
|
||
|
|
"grad_norm": 42.942989349365234,
|
||
|
|
"learning_rate": 3.2974249820416094e-06,
|
||
|
|
"loss": 0.0482,
|
||
|
|
"num_input_tokens_seen": 814392,
|
||
|
|
"step": 945
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.3002421307506054,
|
||
|
|
"grad_norm": 62.229331970214844,
|
||
|
|
"learning_rate": 3.2773651639112432e-06,
|
||
|
|
"loss": 0.0175,
|
||
|
|
"num_input_tokens_seen": 818872,
|
||
|
|
"step": 950
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.3123486682808716,
|
||
|
|
"grad_norm": 0.0007935139001347125,
|
||
|
|
"learning_rate": 3.2572497846824922e-06,
|
||
|
|
"loss": 0.0039,
|
||
|
|
"num_input_tokens_seen": 823096,
|
||
|
|
"step": 955
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.324455205811138,
|
||
|
|
"grad_norm": 2.6152658462524414,
|
||
|
|
"learning_rate": 3.2370802820742273e-06,
|
||
|
|
"loss": 0.0549,
|
||
|
|
"num_input_tokens_seen": 827128,
|
||
|
|
"step": 960
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.3365617433414045,
|
||
|
|
"grad_norm": 0.9258336424827576,
|
||
|
|
"learning_rate": 3.2168580976737105e-06,
|
||
|
|
"loss": 0.0011,
|
||
|
|
"num_input_tokens_seen": 831288,
|
||
|
|
"step": 965
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.348668280871671,
|
||
|
|
"grad_norm": 0.28666022419929504,
|
||
|
|
"learning_rate": 3.1965846768335625e-06,
|
||
|
|
"loss": 0.0202,
|
||
|
|
"num_input_tokens_seen": 835640,
|
||
|
|
"step": 970
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.360774818401937,
|
||
|
|
"grad_norm": 0.0008573018712922931,
|
||
|
|
"learning_rate": 3.176261468568457e-06,
|
||
|
|
"loss": 0.0019,
|
||
|
|
"num_input_tokens_seen": 839736,
|
||
|
|
"step": 975
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.3728813559322033,
|
||
|
|
"grad_norm": 55.564476013183594,
|
||
|
|
"learning_rate": 3.155889925451557e-06,
|
||
|
|
"loss": 0.0363,
|
||
|
|
"num_input_tokens_seen": 844024,
|
||
|
|
"step": 980
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.38498789346247,
|
||
|
|
"grad_norm": 0.025824718177318573,
|
||
|
|
"learning_rate": 3.1354715035106892e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 848248,
|
||
|
|
"step": 985
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.3970944309927362,
|
||
|
|
"grad_norm": 0.0016786637715995312,
|
||
|
|
"learning_rate": 3.115007662124282e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 852472,
|
||
|
|
"step": 990
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.4092009685230025,
|
||
|
|
"grad_norm": 0.014527814462780952,
|
||
|
|
"learning_rate": 3.0944998639170544e-06,
|
||
|
|
"loss": 0.0006,
|
||
|
|
"num_input_tokens_seen": 856824,
|
||
|
|
"step": 995
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.4213075060532687,
|
||
|
|
"grad_norm": 4.990849018096924,
|
||
|
|
"learning_rate": 3.0739495746554785e-06,
|
||
|
|
"loss": 0.0018,
|
||
|
|
"num_input_tokens_seen": 860984,
|
||
|
|
"step": 1000
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.433414043583535,
|
||
|
|
"grad_norm": 0.5683162808418274,
|
||
|
|
"learning_rate": 3.0533582631430153e-06,
|
||
|
|
"loss": 0.068,
|
||
|
|
"num_input_tokens_seen": 865272,
|
||
|
|
"step": 1005
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.4455205811138017,
|
||
|
|
"grad_norm": 5.13648796081543,
|
||
|
|
"learning_rate": 3.0327274011151355e-06,
|
||
|
|
"loss": 0.0395,
|
||
|
|
"num_input_tokens_seen": 869560,
|
||
|
|
"step": 1010
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.457627118644068,
|
||
|
|
"grad_norm": 0.0019914067815989256,
|
||
|
|
"learning_rate": 3.012058463134126e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 873976,
|
||
|
|
"step": 1015
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.469733656174334,
|
||
|
|
"grad_norm": 0.0011213916586712003,
|
||
|
|
"learning_rate": 2.991352926483702e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 878200,
|
||
|
|
"step": 1020
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.4818401937046004,
|
||
|
|
"grad_norm": 0.0030807037837803364,
|
||
|
|
"learning_rate": 2.9706122710634166e-06,
|
||
|
|
"loss": 0.0008,
|
||
|
|
"num_input_tokens_seen": 882872,
|
||
|
|
"step": 1025
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.4939467312348667,
|
||
|
|
"grad_norm": 0.0031619234941899776,
|
||
|
|
"learning_rate": 2.949837979282889e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 887096,
|
||
|
|
"step": 1030
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5060532687651333,
|
||
|
|
"grad_norm": 29.275707244873047,
|
||
|
|
"learning_rate": 2.9290315359558504e-06,
|
||
|
|
"loss": 0.0032,
|
||
|
|
"num_input_tokens_seen": 891576,
|
||
|
|
"step": 1035
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5181598062953996,
|
||
|
|
"grad_norm": 0.013175534084439278,
|
||
|
|
"learning_rate": 2.908194428194019e-06,
|
||
|
|
"loss": 0.0756,
|
||
|
|
"num_input_tokens_seen": 895736,
|
||
|
|
"step": 1040
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5181598062953996,
|
||
|
|
"eval_loss": 0.28947436809539795,
|
||
|
|
"eval_runtime": 0.636,
|
||
|
|
"eval_samples_per_second": 577.029,
|
||
|
|
"eval_steps_per_second": 72.325,
|
||
|
|
"num_input_tokens_seen": 895736,
|
||
|
|
"step": 1040
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.530266343825666,
|
||
|
|
"grad_norm": 0.002881130203604698,
|
||
|
|
"learning_rate": 2.88732814530081e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 900024,
|
||
|
|
"step": 1045
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.542372881355932,
|
||
|
|
"grad_norm": 122.53758239746094,
|
||
|
|
"learning_rate": 2.8664341786648932e-06,
|
||
|
|
"loss": 0.0128,
|
||
|
|
"num_input_tokens_seen": 904440,
|
||
|
|
"step": 1050
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5544794188861983,
|
||
|
|
"grad_norm": 0.0015194405568763614,
|
||
|
|
"learning_rate": 2.845514021653595e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 908728,
|
||
|
|
"step": 1055
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.566585956416465,
|
||
|
|
"grad_norm": 0.13878583908081055,
|
||
|
|
"learning_rate": 2.8245691695061605e-06,
|
||
|
|
"loss": 0.0443,
|
||
|
|
"num_input_tokens_seen": 913016,
|
||
|
|
"step": 1060
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5786924939467313,
|
||
|
|
"grad_norm": 26.11018180847168,
|
||
|
|
"learning_rate": 2.8036011192268863e-06,
|
||
|
|
"loss": 0.0032,
|
||
|
|
"num_input_tokens_seen": 917304,
|
||
|
|
"step": 1065
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.5907990314769975,
|
||
|
|
"grad_norm": 0.003500137245282531,
|
||
|
|
"learning_rate": 2.7826113694781254e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 921528,
|
||
|
|
"step": 1070
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.6029055690072638,
|
||
|
|
"grad_norm": 0.00933838915079832,
|
||
|
|
"learning_rate": 2.7616014204731683e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 925944,
|
||
|
|
"step": 1075
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.61501210653753,
|
||
|
|
"grad_norm": 0.0023421638179570436,
|
||
|
|
"learning_rate": 2.7405727738690193e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 930744,
|
||
|
|
"step": 1080
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.6271186440677967,
|
||
|
|
"grad_norm": 0.009054594673216343,
|
||
|
|
"learning_rate": 2.7195269326590685e-06,
|
||
|
|
"loss": 0.0725,
|
||
|
|
"num_input_tokens_seen": 935352,
|
||
|
|
"step": 1085
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.639225181598063,
|
||
|
|
"grad_norm": 0.02030082233250141,
|
||
|
|
"learning_rate": 2.698465401065667e-06,
|
||
|
|
"loss": 0.0295,
|
||
|
|
"num_input_tokens_seen": 939640,
|
||
|
|
"step": 1090
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.651331719128329,
|
||
|
|
"grad_norm": 0.27530986070632935,
|
||
|
|
"learning_rate": 2.6773896844326126e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 943672,
|
||
|
|
"step": 1095
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.663438256658596,
|
||
|
|
"grad_norm": 0.03071773052215576,
|
||
|
|
"learning_rate": 2.656301289117561e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 947704,
|
||
|
|
"step": 1100
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.6755447941888617,
|
||
|
|
"grad_norm": 0.014886329881846905,
|
||
|
|
"learning_rate": 2.6352017223843584e-06,
|
||
|
|
"loss": 0.0196,
|
||
|
|
"num_input_tokens_seen": 951928,
|
||
|
|
"step": 1105
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.6876513317191284,
|
||
|
|
"grad_norm": 0.06265253573656082,
|
||
|
|
"learning_rate": 2.6140924922953125e-06,
|
||
|
|
"loss": 0.0294,
|
||
|
|
"num_input_tokens_seen": 956216,
|
||
|
|
"step": 1110
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.6997578692493946,
|
||
|
|
"grad_norm": 0.04291946068406105,
|
||
|
|
"learning_rate": 2.592975107603406e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 960504,
|
||
|
|
"step": 1115
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.711864406779661,
|
||
|
|
"grad_norm": 0.01645965874195099,
|
||
|
|
"learning_rate": 2.571851077644461e-06,
|
||
|
|
"loss": 0.0135,
|
||
|
|
"num_input_tokens_seen": 965048,
|
||
|
|
"step": 1120
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7239709443099276,
|
||
|
|
"grad_norm": 0.007254968397319317,
|
||
|
|
"learning_rate": 2.55072191222926e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 969208,
|
||
|
|
"step": 1125
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.736077481840194,
|
||
|
|
"grad_norm": 0.114701047539711,
|
||
|
|
"learning_rate": 2.5295891215356362e-06,
|
||
|
|
"loss": 0.0991,
|
||
|
|
"num_input_tokens_seen": 973624,
|
||
|
|
"step": 1130
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.74818401937046,
|
||
|
|
"grad_norm": 0.0054718079045414925,
|
||
|
|
"learning_rate": 2.5084542160005338e-06,
|
||
|
|
"loss": 0.0064,
|
||
|
|
"num_input_tokens_seen": 977976,
|
||
|
|
"step": 1135
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7602905569007263,
|
||
|
|
"grad_norm": 0.28143173456192017,
|
||
|
|
"learning_rate": 2.4873187062120515e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 982200,
|
||
|
|
"step": 1140
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7699757869249395,
|
||
|
|
"eval_loss": 0.22601255774497986,
|
||
|
|
"eval_runtime": 0.6803,
|
||
|
|
"eval_samples_per_second": 539.434,
|
||
|
|
"eval_steps_per_second": 67.613,
|
||
|
|
"num_input_tokens_seen": 985592,
|
||
|
|
"step": 1144
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7723970944309926,
|
||
|
|
"grad_norm": 0.02263481356203556,
|
||
|
|
"learning_rate": 2.4661841028014786e-06,
|
||
|
|
"loss": 0.0002,
|
||
|
|
"num_input_tokens_seen": 986488,
|
||
|
|
"step": 1145
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7845036319612593,
|
||
|
|
"grad_norm": 0.00884742010384798,
|
||
|
|
"learning_rate": 2.445051916335321e-06,
|
||
|
|
"loss": 0.0002,
|
||
|
|
"num_input_tokens_seen": 990456,
|
||
|
|
"step": 1150
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.7966101694915255,
|
||
|
|
"grad_norm": 0.019946428015828133,
|
||
|
|
"learning_rate": 2.4239236572073354e-06,
|
||
|
|
"loss": 0.0766,
|
||
|
|
"num_input_tokens_seen": 994744,
|
||
|
|
"step": 1155
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.8087167070217918,
|
||
|
|
"grad_norm": 0.010249280370771885,
|
||
|
|
"learning_rate": 2.4028008355305817e-06,
|
||
|
|
"loss": 0.0501,
|
||
|
|
"num_input_tokens_seen": 999160,
|
||
|
|
"step": 1160
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.820823244552058,
|
||
|
|
"grad_norm": 56.294708251953125,
|
||
|
|
"learning_rate": 2.3816849610294784e-06,
|
||
|
|
"loss": 0.0289,
|
||
|
|
"num_input_tokens_seen": 1003256,
|
||
|
|
"step": 1165
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.8329297820823243,
|
||
|
|
"grad_norm": 0.007834532298147678,
|
||
|
|
"learning_rate": 2.3605775429319115e-06,
|
||
|
|
"loss": 0.0884,
|
||
|
|
"num_input_tokens_seen": 1007480,
|
||
|
|
"step": 1170
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.845036319612591,
|
||
|
|
"grad_norm": 0.02283744513988495,
|
||
|
|
"learning_rate": 2.3394800898613536e-06,
|
||
|
|
"loss": 0.0004,
|
||
|
|
"num_input_tokens_seen": 1011896,
|
||
|
|
"step": 1175
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.857142857142857,
|
||
|
|
"grad_norm": 0.05437995120882988,
|
||
|
|
"learning_rate": 2.318394109729041e-06,
|
||
|
|
"loss": 0.0004,
|
||
|
|
"num_input_tokens_seen": 1015992,
|
||
|
|
"step": 1180
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.8692493946731235,
|
||
|
|
"grad_norm": 0.41270506381988525,
|
||
|
|
"learning_rate": 2.297321109626198e-06,
|
||
|
|
"loss": 0.003,
|
||
|
|
"num_input_tokens_seen": 1020408,
|
||
|
|
"step": 1185
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.8813559322033897,
|
||
|
|
"grad_norm": 0.04989304393529892,
|
||
|
|
"learning_rate": 2.27626259571632e-06,
|
||
|
|
"loss": 0.0003,
|
||
|
|
"num_input_tokens_seen": 1025016,
|
||
|
|
"step": 1190
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.893462469733656,
|
||
|
|
"grad_norm": 0.09522224217653275,
|
||
|
|
"learning_rate": 2.2552200731275215e-06,
|
||
|
|
"loss": 0.0571,
|
||
|
|
"num_input_tokens_seen": 1029368,
|
||
|
|
"step": 1195
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.9055690072639226,
|
||
|
|
"grad_norm": 3.600926160812378,
|
||
|
|
"learning_rate": 2.2341950458449576e-06,
|
||
|
|
"loss": 0.0007,
|
||
|
|
"num_input_tokens_seen": 1033592,
|
||
|
|
"step": 1200
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.917675544794189,
|
||
|
|
"grad_norm": 0.005034204572439194,
|
||
|
|
"learning_rate": 2.2131890166033333e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1037688,
|
||
|
|
"step": 1205
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.929782082324455,
|
||
|
|
"grad_norm": 0.01115792989730835,
|
||
|
|
"learning_rate": 2.1922034867794923e-06,
|
||
|
|
"loss": 0.0136,
|
||
|
|
"num_input_tokens_seen": 1041912,
|
||
|
|
"step": 1210
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.9418886198547214,
|
||
|
|
"grad_norm": 0.01456889882683754,
|
||
|
|
"learning_rate": 2.171239956285115e-06,
|
||
|
|
"loss": 0.0003,
|
||
|
|
"num_input_tokens_seen": 1046392,
|
||
|
|
"step": 1215
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.9539951573849876,
|
||
|
|
"grad_norm": 0.009599031880497932,
|
||
|
|
"learning_rate": 2.150299923459505e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1050616,
|
||
|
|
"step": 1220
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.9661016949152543,
|
||
|
|
"grad_norm": 0.04260379076004028,
|
||
|
|
"learning_rate": 2.1293848849625065e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1054840,
|
||
|
|
"step": 1225
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.9782082324455206,
|
||
|
|
"grad_norm": 0.006233742460608482,
|
||
|
|
"learning_rate": 2.108496335667527e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1058936,
|
||
|
|
"step": 1230
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 2.990314769975787,
|
||
|
|
"grad_norm": 0.005781834479421377,
|
||
|
|
"learning_rate": 2.0876357685546942e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1063288,
|
||
|
|
"step": 1235
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.002421307506053,
|
||
|
|
"grad_norm": 0.004296495113521814,
|
||
|
|
"learning_rate": 2.0668046746041497e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1067392,
|
||
|
|
"step": 1240
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0145278450363198,
|
||
|
|
"grad_norm": 0.05696773901581764,
|
||
|
|
"learning_rate": 2.0460045426894816e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1071872,
|
||
|
|
"step": 1245
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0217917675544794,
|
||
|
|
"eval_loss": 0.22526989877223969,
|
||
|
|
"eval_runtime": 0.6509,
|
||
|
|
"eval_samples_per_second": 563.818,
|
||
|
|
"eval_steps_per_second": 70.669,
|
||
|
|
"num_input_tokens_seen": 1074624,
|
||
|
|
"step": 1248
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.026634382566586,
|
||
|
|
"grad_norm": 0.033577512949705124,
|
||
|
|
"learning_rate": 2.0252368594713083e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1076416,
|
||
|
|
"step": 1250
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0387409200968523,
|
||
|
|
"grad_norm": 8.000839233398438,
|
||
|
|
"learning_rate": 2.004503109291023e-06,
|
||
|
|
"loss": 0.0024,
|
||
|
|
"num_input_tokens_seen": 1080512,
|
||
|
|
"step": 1255
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0508474576271185,
|
||
|
|
"grad_norm": 0.0029002963565289974,
|
||
|
|
"learning_rate": 1.9838047740647024e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1084608,
|
||
|
|
"step": 1260
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.062953995157385,
|
||
|
|
"grad_norm": 0.0019197538495063782,
|
||
|
|
"learning_rate": 1.9631433331771886e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1089024,
|
||
|
|
"step": 1265
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0750605326876514,
|
||
|
|
"grad_norm": 0.1197233721613884,
|
||
|
|
"learning_rate": 1.942520263376351e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1093376,
|
||
|
|
"step": 1270
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.0871670702179177,
|
||
|
|
"grad_norm": 0.005993293132632971,
|
||
|
|
"learning_rate": 1.921937038667539e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1097728,
|
||
|
|
"step": 1275
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.099273607748184,
|
||
|
|
"grad_norm": 8.694519996643066,
|
||
|
|
"learning_rate": 1.901395130208229e-06,
|
||
|
|
"loss": 0.0831,
|
||
|
|
"num_input_tokens_seen": 1101888,
|
||
|
|
"step": 1280
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.11138014527845,
|
||
|
|
"grad_norm": 0.007167529780417681,
|
||
|
|
"learning_rate": 1.880896006202876e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1106176,
|
||
|
|
"step": 1285
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.123486682808717,
|
||
|
|
"grad_norm": 0.014599725604057312,
|
||
|
|
"learning_rate": 1.860441131797977e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1110272,
|
||
|
|
"step": 1290
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.135593220338983,
|
||
|
|
"grad_norm": 0.016075119376182556,
|
||
|
|
"learning_rate": 1.8400319689773474e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1114496,
|
||
|
|
"step": 1295
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.1476997578692494,
|
||
|
|
"grad_norm": 0.04632039740681648,
|
||
|
|
"learning_rate": 1.8196699764576316e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1118784,
|
||
|
|
"step": 1300
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.1598062953995156,
|
||
|
|
"grad_norm": 0.014973443932831287,
|
||
|
|
"learning_rate": 1.7993566095840442e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1123008,
|
||
|
|
"step": 1305
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.171912832929782,
|
||
|
|
"grad_norm": 0.017391176894307137,
|
||
|
|
"learning_rate": 1.7790933202263437e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1127424,
|
||
|
|
"step": 1310
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.1840193704600486,
|
||
|
|
"grad_norm": 0.0033686573151499033,
|
||
|
|
"learning_rate": 1.7588815566750728e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1131840,
|
||
|
|
"step": 1315
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.196125907990315,
|
||
|
|
"grad_norm": 0.012609965167939663,
|
||
|
|
"learning_rate": 1.7387227635380362e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1136192,
|
||
|
|
"step": 1320
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.208232445520581,
|
||
|
|
"grad_norm": 0.12501056492328644,
|
||
|
|
"learning_rate": 1.7186183816370522e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1140544,
|
||
|
|
"step": 1325
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2203389830508473,
|
||
|
|
"grad_norm": 0.0041845571249723434,
|
||
|
|
"learning_rate": 1.6985698479049703e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1145280,
|
||
|
|
"step": 1330
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.232445520581114,
|
||
|
|
"grad_norm": 0.0181956198066473,
|
||
|
|
"learning_rate": 1.6785785952829718e-06,
|
||
|
|
"loss": 0.0039,
|
||
|
|
"num_input_tokens_seen": 1149888,
|
||
|
|
"step": 1335
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2445520581113803,
|
||
|
|
"grad_norm": 0.005230342503637075,
|
||
|
|
"learning_rate": 1.6586460526181476e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1153920,
|
||
|
|
"step": 1340
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2566585956416465,
|
||
|
|
"grad_norm": 0.0063026342540979385,
|
||
|
|
"learning_rate": 1.6387736445613772e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1158592,
|
||
|
|
"step": 1345
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2687651331719128,
|
||
|
|
"grad_norm": 0.004648419097065926,
|
||
|
|
"learning_rate": 1.618962791465501e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1162816,
|
||
|
|
"step": 1350
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2736077481840193,
|
||
|
|
"eval_loss": 0.25782492756843567,
|
||
|
|
"eval_runtime": 0.6532,
|
||
|
|
"eval_samples_per_second": 561.843,
|
||
|
|
"eval_steps_per_second": 70.422,
|
||
|
|
"num_input_tokens_seen": 1164544,
|
||
|
|
"step": 1352
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.280871670702179,
|
||
|
|
"grad_norm": 0.003744626184925437,
|
||
|
|
"learning_rate": 1.599214909283805e-06,
|
||
|
|
"loss": 0.0002,
|
||
|
|
"num_input_tokens_seen": 1167232,
|
||
|
|
"step": 1355
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.2929782082324457,
|
||
|
|
"grad_norm": 0.002472149208188057,
|
||
|
|
"learning_rate": 1.579531409468815e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1171648,
|
||
|
|
"step": 1360
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.305084745762712,
|
||
|
|
"grad_norm": 0.00250844843685627,
|
||
|
|
"learning_rate": 1.5599136988714186e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1175808,
|
||
|
|
"step": 1365
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.317191283292978,
|
||
|
|
"grad_norm": 0.0014152796939015388,
|
||
|
|
"learning_rate": 1.5403631796403085e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1180224,
|
||
|
|
"step": 1370
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.3292978208232444,
|
||
|
|
"grad_norm": 0.00422420259565115,
|
||
|
|
"learning_rate": 1.5208812491217669e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1184704,
|
||
|
|
"step": 1375
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.341404358353511,
|
||
|
|
"grad_norm": 0.004841359332203865,
|
||
|
|
"learning_rate": 1.5014692997597962e-06,
|
||
|
|
"loss": 0.053,
|
||
|
|
"num_input_tokens_seen": 1188992,
|
||
|
|
"step": 1380
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.3535108958837774,
|
||
|
|
"grad_norm": 0.0019620037637650967,
|
||
|
|
"learning_rate": 1.4821287189965865e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1193408,
|
||
|
|
"step": 1385
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.3656174334140436,
|
||
|
|
"grad_norm": 0.0014666810166090727,
|
||
|
|
"learning_rate": 1.4628608891733626e-06,
|
||
|
|
"loss": 0.0002,
|
||
|
|
"num_input_tokens_seen": 1197760,
|
||
|
|
"step": 1390
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.37772397094431,
|
||
|
|
"grad_norm": 0.0015612264396622777,
|
||
|
|
"learning_rate": 1.443667187431572e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1201792,
|
||
|
|
"step": 1395
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.389830508474576,
|
||
|
|
"grad_norm": 0.03145941346883774,
|
||
|
|
"learning_rate": 1.4245489856144633e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1205824,
|
||
|
|
"step": 1400
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.401937046004843,
|
||
|
|
"grad_norm": 0.0026610263157635927,
|
||
|
|
"learning_rate": 1.4055076501690313e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1210240,
|
||
|
|
"step": 1405
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.414043583535109,
|
||
|
|
"grad_norm": 0.002824948402121663,
|
||
|
|
"learning_rate": 1.3865445420483524e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1214464,
|
||
|
|
"step": 1410
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.4261501210653753,
|
||
|
|
"grad_norm": 3.925107002258301,
|
||
|
|
"learning_rate": 1.367661016614315e-06,
|
||
|
|
"loss": 0.0005,
|
||
|
|
"num_input_tokens_seen": 1218752,
|
||
|
|
"step": 1415
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.4382566585956416,
|
||
|
|
"grad_norm": 0.0029422210063785315,
|
||
|
|
"learning_rate": 1.348858423540744e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1223168,
|
||
|
|
"step": 1420
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.450363196125908,
|
||
|
|
"grad_norm": 0.00631983857601881,
|
||
|
|
"learning_rate": 1.3301381067169367e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1227328,
|
||
|
|
"step": 1425
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.4624697336561745,
|
||
|
|
"grad_norm": 0.0015673706075176597,
|
||
|
|
"learning_rate": 1.3115014041516088e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1231360,
|
||
|
|
"step": 1430
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.4745762711864407,
|
||
|
|
"grad_norm": 0.001485335873439908,
|
||
|
|
"learning_rate": 1.2929496478772635e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1235456,
|
||
|
|
"step": 1435
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.486682808716707,
|
||
|
|
"grad_norm": 0.001463928259909153,
|
||
|
|
"learning_rate": 1.2744841638549843e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1239616,
|
||
|
|
"step": 1440
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.4987893462469732,
|
||
|
|
"grad_norm": 0.013075617142021656,
|
||
|
|
"learning_rate": 1.2561062718796663e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1243968,
|
||
|
|
"step": 1445
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.5108958837772395,
|
||
|
|
"grad_norm": 0.00566933723166585,
|
||
|
|
"learning_rate": 1.2378172854856831e-06,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1248128,
|
||
|
|
"step": 1450
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.523002421307506,
|
||
|
|
"grad_norm": 0.004368732683360577,
|
||
|
|
"learning_rate": 1.2196185118530063e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1252288,
|
||
|
|
"step": 1455
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.5254237288135593,
|
||
|
|
"eval_loss": 0.2580437958240509,
|
||
|
|
"eval_runtime": 0.6456,
|
||
|
|
"eval_samples_per_second": 568.425,
|
||
|
|
"eval_steps_per_second": 71.247,
|
||
|
|
"num_input_tokens_seen": 1253248,
|
||
|
|
"step": 1456
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.5351089588377724,
|
||
|
|
"grad_norm": 0.0046086618676781654,
|
||
|
|
"learning_rate": 1.2015112517137744e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1256640,
|
||
|
|
"step": 1460
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.5472154963680387,
|
||
|
|
"grad_norm": 0.0021092540118843317,
|
||
|
|
"learning_rate": 1.183496799259326e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1261440,
|
||
|
|
"step": 1465
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.559322033898305,
|
||
|
|
"grad_norm": 0.0011951872147619724,
|
||
|
|
"learning_rate": 1.165576442047699e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1265664,
|
||
|
|
"step": 1470
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.571428571428571,
|
||
|
|
"grad_norm": 0.003473962191492319,
|
||
|
|
"learning_rate": 1.147751460911604e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1270016,
|
||
|
|
"step": 1475
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.583535108958838,
|
||
|
|
"grad_norm": 0.0040196748450398445,
|
||
|
|
"learning_rate": 1.1300231298668786e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1274560,
|
||
|
|
"step": 1480
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.595641646489104,
|
||
|
|
"grad_norm": 0.0007644465658813715,
|
||
|
|
"learning_rate": 1.112392716021429e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1278976,
|
||
|
|
"step": 1485
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.6077481840193704,
|
||
|
|
"grad_norm": 0.00214450154453516,
|
||
|
|
"learning_rate": 1.0948614794846668e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1283200,
|
||
|
|
"step": 1490
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.619854721549637,
|
||
|
|
"grad_norm": 0.004525753669440746,
|
||
|
|
"learning_rate": 1.0774306732774414e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1287296,
|
||
|
|
"step": 1495
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.6319612590799033,
|
||
|
|
"grad_norm": 0.0008026693249121308,
|
||
|
|
"learning_rate": 1.0601015432424818e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1291712,
|
||
|
|
"step": 1500
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.6440677966101696,
|
||
|
|
"grad_norm": 0.0020796298049390316,
|
||
|
|
"learning_rate": 1.0428753279553561e-06,
|
||
|
|
"loss": 0.0328,
|
||
|
|
"num_input_tokens_seen": 1295936,
|
||
|
|
"step": 1505
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.656174334140436,
|
||
|
|
"grad_norm": 16.17505645751953,
|
||
|
|
"learning_rate": 1.0257532586359422e-06,
|
||
|
|
"loss": 0.0527,
|
||
|
|
"num_input_tokens_seen": 1300608,
|
||
|
|
"step": 1510
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.668280871670702,
|
||
|
|
"grad_norm": 0.005263039376586676,
|
||
|
|
"learning_rate": 1.008736559060429e-06,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1305024,
|
||
|
|
"step": 1515
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.6803874092009687,
|
||
|
|
"grad_norm": 0.015520356595516205,
|
||
|
|
"learning_rate": 9.918264454738504e-07,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1309376,
|
||
|
|
"step": 1520
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.692493946731235,
|
||
|
|
"grad_norm": 0.01658783107995987,
|
||
|
|
"learning_rate": 9.750241265031529e-07,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1313664,
|
||
|
|
"step": 1525
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7046004842615012,
|
||
|
|
"grad_norm": 0.0006883519235998392,
|
||
|
|
"learning_rate": 9.583308030708135e-07,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1318080,
|
||
|
|
"step": 1530
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7167070217917675,
|
||
|
|
"grad_norm": 0.0012563606724143028,
|
||
|
|
"learning_rate": 9.417476683090007e-07,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1322432,
|
||
|
|
"step": 1535
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7288135593220337,
|
||
|
|
"grad_norm": 0.0474010594189167,
|
||
|
|
"learning_rate": 9.252759074743034e-07,
|
||
|
|
"loss": 0.0003,
|
||
|
|
"num_input_tokens_seen": 1326848,
|
||
|
|
"step": 1540
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7409200968523004,
|
||
|
|
"grad_norm": 0.012124676257371902,
|
||
|
|
"learning_rate": 9.08916697863014e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1331328,
|
||
|
|
"step": 1545
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7530266343825667,
|
||
|
|
"grad_norm": 0.0076986453495919704,
|
||
|
|
"learning_rate": 8.926712087269801e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1335424,
|
||
|
|
"step": 1550
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.765133171912833,
|
||
|
|
"grad_norm": 0.004324712324887514,
|
||
|
|
"learning_rate": 8.765406011900368e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1339712,
|
||
|
|
"step": 1555
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.777239709443099,
|
||
|
|
"grad_norm": 0.007034031208604574,
|
||
|
|
"learning_rate": 8.605260281650152e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1344000,
|
||
|
|
"step": 1560
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.777239709443099,
|
||
|
|
"eval_loss": 0.2703007757663727,
|
||
|
|
"eval_runtime": 0.6408,
|
||
|
|
"eval_samples_per_second": 572.737,
|
||
|
|
"eval_steps_per_second": 71.787,
|
||
|
|
"num_input_tokens_seen": 1344000,
|
||
|
|
"step": 1560
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.7893462469733654,
|
||
|
|
"grad_norm": 0.00895662046968937,
|
||
|
|
"learning_rate": 8.44628634271342e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1348224,
|
||
|
|
"step": 1565
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.801452784503632,
|
||
|
|
"grad_norm": 0.002478554379194975,
|
||
|
|
"learning_rate": 8.288495557532241e-07,
|
||
|
|
"loss": 0.0017,
|
||
|
|
"num_input_tokens_seen": 1352576,
|
||
|
|
"step": 1570
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.8135593220338984,
|
||
|
|
"grad_norm": 11.973031997680664,
|
||
|
|
"learning_rate": 8.131899203984464e-07,
|
||
|
|
"loss": 0.0616,
|
||
|
|
"num_input_tokens_seen": 1356864,
|
||
|
|
"step": 1575
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.8256658595641646,
|
||
|
|
"grad_norm": 0.0017213321989402175,
|
||
|
|
"learning_rate": 7.976508474577549e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1361152,
|
||
|
|
"step": 1580
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.837772397094431,
|
||
|
|
"grad_norm": 0.0008722307975403965,
|
||
|
|
"learning_rate": 7.822334475648655e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1365376,
|
||
|
|
"step": 1585
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.849878934624697,
|
||
|
|
"grad_norm": 0.0017557705286890268,
|
||
|
|
"learning_rate": 7.66938822657081e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1369728,
|
||
|
|
"step": 1590
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.861985472154964,
|
||
|
|
"grad_norm": 0.0016538921045139432,
|
||
|
|
"learning_rate": 7.517680658965328e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1374144,
|
||
|
|
"step": 1595
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.87409200968523,
|
||
|
|
"grad_norm": 0.0021663156803697348,
|
||
|
|
"learning_rate": 7.367222615920477e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1378368,
|
||
|
|
"step": 1600
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.8861985472154963,
|
||
|
|
"grad_norm": 0.001977034378796816,
|
||
|
|
"learning_rate": 7.21802485121649e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1382464,
|
||
|
|
"step": 1605
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.898305084745763,
|
||
|
|
"grad_norm": 0.012750508263707161,
|
||
|
|
"learning_rate": 7.070098028556949e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1386880,
|
||
|
|
"step": 1610
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.910411622276029,
|
||
|
|
"grad_norm": 0.004628063179552555,
|
||
|
|
"learning_rate": 6.923452720806612e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1391296,
|
||
|
|
"step": 1615
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.9225181598062955,
|
||
|
|
"grad_norm": 0.0018564671045169234,
|
||
|
|
"learning_rate": 6.778099409235739e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1395456,
|
||
|
|
"step": 1620
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.9346246973365617,
|
||
|
|
"grad_norm": 0.0023303565103560686,
|
||
|
|
"learning_rate": 6.634048482770946e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1399616,
|
||
|
|
"step": 1625
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.946731234866828,
|
||
|
|
"grad_norm": 0.0011153841624036431,
|
||
|
|
"learning_rate": 6.491310237252679e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1403712,
|
||
|
|
"step": 1630
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.9588377723970947,
|
||
|
|
"grad_norm": 0.0012228480773046613,
|
||
|
|
"learning_rate": 6.349894874699345e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1408128,
|
||
|
|
"step": 1635
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.970944309927361,
|
||
|
|
"grad_norm": 0.0019976331386715174,
|
||
|
|
"learning_rate": 6.209812502578113e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1412480,
|
||
|
|
"step": 1640
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.983050847457627,
|
||
|
|
"grad_norm": 0.0029911224264651537,
|
||
|
|
"learning_rate": 6.071073133082492e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1416704,
|
||
|
|
"step": 1645
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 3.9951573849878934,
|
||
|
|
"grad_norm": 0.0036039084661751986,
|
||
|
|
"learning_rate": 5.933686682416759e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1421120,
|
||
|
|
"step": 1650
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.00726392251816,
|
||
|
|
"grad_norm": 0.00185173109639436,
|
||
|
|
"learning_rate": 5.797662970087184e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1424944,
|
||
|
|
"step": 1655
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.019370460048426,
|
||
|
|
"grad_norm": 0.002781663788482547,
|
||
|
|
"learning_rate": 5.663011718200201e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1429296,
|
||
|
|
"step": 1660
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.0290556900726395,
|
||
|
|
"eval_loss": 0.2501881718635559,
|
||
|
|
"eval_runtime": 0.6431,
|
||
|
|
"eval_samples_per_second": 570.66,
|
||
|
|
"eval_steps_per_second": 71.527,
|
||
|
|
"num_input_tokens_seen": 1432880,
|
||
|
|
"step": 1664
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.031476997578692,
|
||
|
|
"grad_norm": 0.0033902269788086414,
|
||
|
|
"learning_rate": 5.529742550767545e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1433776,
|
||
|
|
"step": 1665
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.043583535108959,
|
||
|
|
"grad_norm": 0.005406382493674755,
|
||
|
|
"learning_rate": 5.397864993018367e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1438000,
|
||
|
|
"step": 1670
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.0556900726392255,
|
||
|
|
"grad_norm": 0.0016813945258036256,
|
||
|
|
"learning_rate": 5.267388470718449e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1442352,
|
||
|
|
"step": 1675
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.067796610169491,
|
||
|
|
"grad_norm": 0.0014291841071099043,
|
||
|
|
"learning_rate": 5.138322309496504e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1446704,
|
||
|
|
"step": 1680
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.079903147699758,
|
||
|
|
"grad_norm": 0.0015732025494799018,
|
||
|
|
"learning_rate": 5.010675734177631e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1450864,
|
||
|
|
"step": 1685
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.092009685230024,
|
||
|
|
"grad_norm": 0.0017645714106038213,
|
||
|
|
"learning_rate": 4.884457868124001e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1455088,
|
||
|
|
"step": 1690
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.1041162227602905,
|
||
|
|
"grad_norm": 0.0008963189902715385,
|
||
|
|
"learning_rate": 4.759677732582782e-07,
|
||
|
|
"loss": 0.0051,
|
||
|
|
"num_input_tokens_seen": 1459376,
|
||
|
|
"step": 1695
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.116222760290557,
|
||
|
|
"grad_norm": 0.005383517127484083,
|
||
|
|
"learning_rate": 4.6363442460413215e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1463600,
|
||
|
|
"step": 1700
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.128329297820823,
|
||
|
|
"grad_norm": 0.001712340977974236,
|
||
|
|
"learning_rate": 4.514466223589753e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1468080,
|
||
|
|
"step": 1705
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.14043583535109,
|
||
|
|
"grad_norm": 0.0034400331787765026,
|
||
|
|
"learning_rate": 4.394052376290914e-07,
|
||
|
|
"loss": 0.0253,
|
||
|
|
"num_input_tokens_seen": 1472624,
|
||
|
|
"step": 1710
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.1525423728813555,
|
||
|
|
"grad_norm": 0.0054069641046226025,
|
||
|
|
"learning_rate": 4.2751113105577587e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1477040,
|
||
|
|
"step": 1715
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.164648910411622,
|
||
|
|
"grad_norm": 0.01111331395804882,
|
||
|
|
"learning_rate": 4.157651527538223e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1481328,
|
||
|
|
"step": 1720
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.176755447941889,
|
||
|
|
"grad_norm": 0.0013017026940360665,
|
||
|
|
"learning_rate": 4.041681422507604e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1485808,
|
||
|
|
"step": 1725
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.188861985472155,
|
||
|
|
"grad_norm": 0.002490431070327759,
|
||
|
|
"learning_rate": 3.927209284268535e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1490160,
|
||
|
|
"step": 1730
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.200968523002421,
|
||
|
|
"grad_norm": 0.008939598686993122,
|
||
|
|
"learning_rate": 3.8142432945585425e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1494512,
|
||
|
|
"step": 1735
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.213075060532688,
|
||
|
|
"grad_norm": 0.002455121139064431,
|
||
|
|
"learning_rate": 3.702791527465274e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1498480,
|
||
|
|
"step": 1740
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.225181598062954,
|
||
|
|
"grad_norm": 0.0009606365929357708,
|
||
|
|
"learning_rate": 3.592861948849416e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1502768,
|
||
|
|
"step": 1745
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.237288135593221,
|
||
|
|
"grad_norm": 0.0018587886588647962,
|
||
|
|
"learning_rate": 3.484462415775333e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1506992,
|
||
|
|
"step": 1750
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.249394673123486,
|
||
|
|
"grad_norm": 0.003451196476817131,
|
||
|
|
"learning_rate": 3.377600675949527e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1511472,
|
||
|
|
"step": 1755
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.261501210653753,
|
||
|
|
"grad_norm": 0.002900092862546444,
|
||
|
|
"learning_rate": 3.272284367166825e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1515824,
|
||
|
|
"step": 1760
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.27360774818402,
|
||
|
|
"grad_norm": 0.005786838009953499,
|
||
|
|
"learning_rate": 3.1685210167645336e-07,
|
||
|
|
"loss": 0.0001,
|
||
|
|
"num_input_tokens_seen": 1520176,
|
||
|
|
"step": 1765
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.280871670702179,
|
||
|
|
"eval_loss": 0.25040701031684875,
|
||
|
|
"eval_runtime": 2.3855,
|
||
|
|
"eval_samples_per_second": 153.845,
|
||
|
|
"eval_steps_per_second": 19.283,
|
||
|
|
"num_input_tokens_seen": 1522544,
|
||
|
|
"step": 1768
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.285714285714286,
|
||
|
|
"grad_norm": 0.0017267963849008083,
|
||
|
|
"learning_rate": 3.066318041084398e-07,
|
||
|
|
"loss": 0.0018,
|
||
|
|
"num_input_tokens_seen": 1524336,
|
||
|
|
"step": 1770
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.297820823244552,
|
||
|
|
"grad_norm": 0.011876060627400875,
|
||
|
|
"learning_rate": 2.9656827449425495e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1528560,
|
||
|
|
"step": 1775
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.309927360774818,
|
||
|
|
"grad_norm": 0.005988541524857283,
|
||
|
|
"learning_rate": 2.86662232110739e-07,
|
||
|
|
"loss": 0.026,
|
||
|
|
"num_input_tokens_seen": 1532720,
|
||
|
|
"step": 1780
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.322033898305085,
|
||
|
|
"grad_norm": 0.003772641299292445,
|
||
|
|
"learning_rate": 2.769143849785513e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1536944,
|
||
|
|
"step": 1785
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.3341404358353515,
|
||
|
|
"grad_norm": 0.0012762263650074601,
|
||
|
|
"learning_rate": 2.673254298115646e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1541168,
|
||
|
|
"step": 1790
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.346246973365617,
|
||
|
|
"grad_norm": 0.002060825005173683,
|
||
|
|
"learning_rate": 2.5789605196706675e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1545456,
|
||
|
|
"step": 1795
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.358353510895884,
|
||
|
|
"grad_norm": 0.002298081526532769,
|
||
|
|
"learning_rate": 2.4862692539677907e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1549872,
|
||
|
|
"step": 1800
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.37046004842615,
|
||
|
|
"grad_norm": 0.0016389107331633568,
|
||
|
|
"learning_rate": 2.39518712598685e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1554288,
|
||
|
|
"step": 1805
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.3825665859564165,
|
||
|
|
"grad_norm": 0.0016518625197932124,
|
||
|
|
"learning_rate": 2.3057206456967908e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1558384,
|
||
|
|
"step": 1810
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.394673123486683,
|
||
|
|
"grad_norm": 0.003522375365719199,
|
||
|
|
"learning_rate": 2.2178762075903747e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1562544,
|
||
|
|
"step": 1815
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.406779661016949,
|
||
|
|
"grad_norm": 0.001208233181387186,
|
||
|
|
"learning_rate": 2.131660090227139e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1567216,
|
||
|
|
"step": 1820
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.418886198547216,
|
||
|
|
"grad_norm": 0.0012071267701685429,
|
||
|
|
"learning_rate": 2.0470784557846652e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1571568,
|
||
|
|
"step": 1825
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.4309927360774815,
|
||
|
|
"grad_norm": 0.0016566209960728884,
|
||
|
|
"learning_rate": 1.9641373496181143e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1575792,
|
||
|
|
"step": 1830
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.443099273607748,
|
||
|
|
"grad_norm": 0.002269514137879014,
|
||
|
|
"learning_rate": 1.882842699828169e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1580080,
|
||
|
|
"step": 1835
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.455205811138015,
|
||
|
|
"grad_norm": 0.0023223496973514557,
|
||
|
|
"learning_rate": 1.8032003168373306e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1584112,
|
||
|
|
"step": 1840
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.467312348668281,
|
||
|
|
"grad_norm": 0.001878439332358539,
|
||
|
|
"learning_rate": 1.7252158929746133e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1588400,
|
||
|
|
"step": 1845
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.479418886198547,
|
||
|
|
"grad_norm": 0.009905189275741577,
|
||
|
|
"learning_rate": 1.6488950020686956e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1592816,
|
||
|
|
"step": 1850
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.491525423728813,
|
||
|
|
"grad_norm": 0.0024043757002800703,
|
||
|
|
"learning_rate": 1.5742430990495465e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1597296,
|
||
|
|
"step": 1855
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.50363196125908,
|
||
|
|
"grad_norm": 0.0012564613716676831,
|
||
|
|
"learning_rate": 1.501265519558537e-07,
|
||
|
|
"loss": 0.0184,
|
||
|
|
"num_input_tokens_seen": 1601648,
|
||
|
|
"step": 1860
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.5157384987893465,
|
||
|
|
"grad_norm": 0.014631015248596668,
|
||
|
|
"learning_rate": 1.4299674795670765e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1605936,
|
||
|
|
"step": 1865
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.527845036319612,
|
||
|
|
"grad_norm": 0.002841898240149021,
|
||
|
|
"learning_rate": 1.360354075003828e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1610096,
|
||
|
|
"step": 1870
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.532687651331719,
|
||
|
|
"eval_loss": 0.2488991767168045,
|
||
|
|
"eval_runtime": 0.6331,
|
||
|
|
"eval_samples_per_second": 579.694,
|
||
|
|
"eval_steps_per_second": 72.659,
|
||
|
|
"num_input_tokens_seen": 1611760,
|
||
|
|
"step": 1872
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.539951573849879,
|
||
|
|
"grad_norm": 0.008398376405239105,
|
||
|
|
"learning_rate": 1.2924302813904582e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1614384,
|
||
|
|
"step": 1875
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.552058111380145,
|
||
|
|
"grad_norm": 0.0019534530583769083,
|
||
|
|
"learning_rate": 1.2262009534860368e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1618800,
|
||
|
|
"step": 1880
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.5641646489104115,
|
||
|
|
"grad_norm": 0.0013132602907717228,
|
||
|
|
"learning_rate": 1.161670824940045e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1622960,
|
||
|
|
"step": 1885
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.576271186440678,
|
||
|
|
"grad_norm": 0.0015446435427293181,
|
||
|
|
"learning_rate": 1.0988445079540389e-07,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1627056,
|
||
|
|
"step": 1890
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.588377723970944,
|
||
|
|
"grad_norm": 0.0025299135595560074,
|
||
|
|
"learning_rate": 1.0377264929520126e-07,
|
||
|
|
"loss": 0.0002,
|
||
|
|
"num_input_tokens_seen": 1631408,
|
||
|
|
"step": 1895
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.600484261501211,
|
||
|
|
"grad_norm": 0.0017427592538297176,
|
||
|
|
"learning_rate": 9.783211482594285e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1635888,
|
||
|
|
"step": 1900
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.6125907990314765,
|
||
|
|
"grad_norm": 0.002218902576714754,
|
||
|
|
"learning_rate": 9.206327197910203e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1640176,
|
||
|
|
"step": 1905
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.624697336561743,
|
||
|
|
"grad_norm": 0.004275280050933361,
|
||
|
|
"learning_rate": 8.64665330747308e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1644528,
|
||
|
|
"step": 1910
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.63680387409201,
|
||
|
|
"grad_norm": 0.00501580024138093,
|
||
|
|
"learning_rate": 8.104229813199111e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1649264,
|
||
|
|
"step": 1915
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.648910411622276,
|
||
|
|
"grad_norm": 0.004407305270433426,
|
||
|
|
"learning_rate": 7.579095484056193e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1653808,
|
||
|
|
"step": 1920
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.661016949152542,
|
||
|
|
"grad_norm": 0.001284220372326672,
|
||
|
|
"learning_rate": 7.071287853293141e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1658288,
|
||
|
|
"step": 1925
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.673123486682809,
|
||
|
|
"grad_norm": 0.0011293090647086501,
|
||
|
|
"learning_rate": 6.580843215757082e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1662576,
|
||
|
|
"step": 1930
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.685230024213075,
|
||
|
|
"grad_norm": 0.007353964261710644,
|
||
|
|
"learning_rate": 6.107796625299117e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1667056,
|
||
|
|
"step": 1935
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.697336561743342,
|
||
|
|
"grad_norm": 0.0041248914785683155,
|
||
|
|
"learning_rate": 5.652181892269182e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1671536,
|
||
|
|
"step": 1940
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.709443099273607,
|
||
|
|
"grad_norm": 0.0036760459188371897,
|
||
|
|
"learning_rate": 5.214031581099149e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1675888,
|
||
|
|
"step": 1945
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.721549636803874,
|
||
|
|
"grad_norm": 0.0022840022575110197,
|
||
|
|
"learning_rate": 4.793377007975719e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1680176,
|
||
|
|
"step": 1950
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.733656174334141,
|
||
|
|
"grad_norm": 0.004901141859591007,
|
||
|
|
"learning_rate": 4.3902482386018186e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1684400,
|
||
|
|
"step": 1955
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.745762711864407,
|
||
|
|
"grad_norm": 0.004255094565451145,
|
||
|
|
"learning_rate": 4.004674086047905e-08,
|
||
|
|
"loss": 0.0357,
|
||
|
|
"num_input_tokens_seen": 1688816,
|
||
|
|
"step": 1960
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.757869249394673,
|
||
|
|
"grad_norm": 0.0026743041817098856,
|
||
|
|
"learning_rate": 3.636682108692502e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1693360,
|
||
|
|
"step": 1965
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.76997578692494,
|
||
|
|
"grad_norm": 0.0023194768000394106,
|
||
|
|
"learning_rate": 3.286298608252442e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1697584,
|
||
|
|
"step": 1970
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.782082324455206,
|
||
|
|
"grad_norm": 0.003945828415453434,
|
||
|
|
"learning_rate": 2.953548627903202e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1702000,
|
||
|
|
"step": 1975
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.784503631961259,
|
||
|
|
"eval_loss": 0.2507624924182892,
|
||
|
|
"eval_runtime": 0.6499,
|
||
|
|
"eval_samples_per_second": 564.699,
|
||
|
|
"eval_steps_per_second": 70.78,
|
||
|
|
"num_input_tokens_seen": 1702832,
|
||
|
|
"step": 1976
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.7941888619854724,
|
||
|
|
"grad_norm": 0.0016053339932113886,
|
||
|
|
"learning_rate": 2.6384559504886164e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1706416,
|
||
|
|
"step": 1980
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.806295399515738,
|
||
|
|
"grad_norm": 0.0013666612794622779,
|
||
|
|
"learning_rate": 2.3410430968214825e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1710960,
|
||
|
|
"step": 1985
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.818401937046005,
|
||
|
|
"grad_norm": 0.0037024938501417637,
|
||
|
|
"learning_rate": 2.0613313240735457e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1715440,
|
||
|
|
"step": 1990
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.830508474576272,
|
||
|
|
"grad_norm": 0.0014027768047526479,
|
||
|
|
"learning_rate": 1.7993406242563238e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1719728,
|
||
|
|
"step": 1995
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.842615012106537,
|
||
|
|
"grad_norm": 0.0023848332930356264,
|
||
|
|
"learning_rate": 1.5550897227922522e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1724272,
|
||
|
|
"step": 2000
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.854721549636804,
|
||
|
|
"grad_norm": 0.001933931838721037,
|
||
|
|
"learning_rate": 1.3285960771761696e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1728560,
|
||
|
|
"step": 2005
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.86682808716707,
|
||
|
|
"grad_norm": 0.0011232432443648577,
|
||
|
|
"learning_rate": 1.119875875727705e-08,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1733104,
|
||
|
|
"step": 2010
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.878934624697337,
|
||
|
|
"grad_norm": 0.0012837464455515146,
|
||
|
|
"learning_rate": 9.289440364341484e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1737264,
|
||
|
|
"step": 2015
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.891041162227603,
|
||
|
|
"grad_norm": 0.003382457885891199,
|
||
|
|
"learning_rate": 7.558142058842755e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1741424,
|
||
|
|
"step": 2020
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.903147699757869,
|
||
|
|
"grad_norm": 0.002113591879606247,
|
||
|
|
"learning_rate": 6.004987582929056e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1745648,
|
||
|
|
"step": 2025
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.915254237288136,
|
||
|
|
"grad_norm": 0.0015381674747914076,
|
||
|
|
"learning_rate": 4.6300879461655404e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1749872,
|
||
|
|
"step": 2030
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.927360774818402,
|
||
|
|
"grad_norm": 0.0024200750049203634,
|
||
|
|
"learning_rate": 3.4335414175995506e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1754288,
|
||
|
|
"step": 2035
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.939467312348668,
|
||
|
|
"grad_norm": 0.0022603883408010006,
|
||
|
|
"learning_rate": 2.4154335187365207e-09,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1758640,
|
||
|
|
"step": 2040
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.951573849878935,
|
||
|
|
"grad_norm": 0.001337168039754033,
|
||
|
|
"learning_rate": 1.575837017428472e-09,
|
||
|
|
"loss": 0.0003,
|
||
|
|
"num_input_tokens_seen": 1762928,
|
||
|
|
"step": 2045
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.963680387409201,
|
||
|
|
"grad_norm": 0.002117044758051634,
|
||
|
|
"learning_rate": 9.14811922672898e-10,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1767344,
|
||
|
|
"step": 2050
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.9757869249394675,
|
||
|
|
"grad_norm": 0.000865236681420356,
|
||
|
|
"learning_rate": 4.3240548032230657e-10,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1771632,
|
||
|
|
"step": 2055
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 4.987893462469733,
|
||
|
|
"grad_norm": 0.001379348454065621,
|
||
|
|
"learning_rate": 1.2865216970914253e-10,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1775728,
|
||
|
|
"step": 2060
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 5.0,
|
||
|
|
"grad_norm": 0.0014177365228533745,
|
||
|
|
"learning_rate": 3.573701180537015e-12,
|
||
|
|
"loss": 0.0,
|
||
|
|
"num_input_tokens_seen": 1780000,
|
||
|
|
"step": 2065
|
||
|
|
},
|
||
|
|
{
|
||
|
|
"epoch": 5.0,
|
||
|
|
"num_input_tokens_seen": 1780000,
|
||
|
|
"step": 2065,
|
||
|
|
"total_flos": 1.039320047616e+16,
|
||
|
|
"train_loss": 0.06261659951716346,
|
||
|
|
"train_runtime": 1141.6604,
|
||
|
|
"train_samples_per_second": 14.457,
|
||
|
|
"train_steps_per_second": 1.809
|
||
|
|
}
|
||
|
|
],
|
||
|
|
"logging_steps": 5,
|
||
|
|
"max_steps": 2065,
|
||
|
|
"num_input_tokens_seen": 1780000,
|
||
|
|
"num_train_epochs": 5,
|
||
|
|
"save_steps": 104,
|
||
|
|
"stateful_callbacks": {
|
||
|
|
"TrainerControl": {
|
||
|
|
"args": {
|
||
|
|
"should_epoch_stop": false,
|
||
|
|
"should_evaluate": false,
|
||
|
|
"should_log": false,
|
||
|
|
"should_save": true,
|
||
|
|
"should_training_stop": true
|
||
|
|
},
|
||
|
|
"attributes": {}
|
||
|
|
}
|
||
|
|
},
|
||
|
|
"total_flos": 1.039320047616e+16,
|
||
|
|
"train_batch_size": 8,
|
||
|
|
"trial_name": null,
|
||
|
|
"trial_params": null
|
||
|
|
}
|