Files
Qwen2.5-Coder-32B-Instruct-…/checkpoint-369/trainer_state.json

2942 lines
69 KiB
JSON
Raw Normal View History

{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 10,
"global_step": 369,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002711864406779661,
"grad_norm": 2.28125,
"learning_rate": 0.0,
"loss": 0.7736,
"step": 1
},
{
"epoch": 0.005423728813559322,
"grad_norm": 3.078125,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2456,
"step": 2
},
{
"epoch": 0.008135593220338983,
"grad_norm": 2.71875,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0688,
"step": 3
},
{
"epoch": 0.010847457627118645,
"grad_norm": 2.53125,
"learning_rate": 6e-06,
"loss": 0.9478,
"step": 4
},
{
"epoch": 0.013559322033898305,
"grad_norm": 2.3125,
"learning_rate": 8.000000000000001e-06,
"loss": 0.8185,
"step": 5
},
{
"epoch": 0.016271186440677966,
"grad_norm": 2.921875,
"learning_rate": 1e-05,
"loss": 0.9357,
"step": 6
},
{
"epoch": 0.018983050847457626,
"grad_norm": 2.453125,
"learning_rate": 9.972527472527474e-06,
"loss": 0.9244,
"step": 7
},
{
"epoch": 0.02169491525423729,
"grad_norm": 1.7578125,
"learning_rate": 9.945054945054946e-06,
"loss": 0.6396,
"step": 8
},
{
"epoch": 0.02440677966101695,
"grad_norm": 1.421875,
"learning_rate": 9.917582417582419e-06,
"loss": 0.6322,
"step": 9
},
{
"epoch": 0.02711864406779661,
"grad_norm": 1.1953125,
"learning_rate": 9.890109890109892e-06,
"loss": 0.5287,
"step": 10
},
{
"epoch": 0.02711864406779661,
"eval_loss": 0.7306801080703735,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.2959,
"eval_samples_per_second": 8.853,
"eval_steps_per_second": 4.426,
"step": 10
},
{
"epoch": 0.029830508474576273,
"grad_norm": 1.2578125,
"learning_rate": 9.862637362637364e-06,
"loss": 0.464,
"step": 11
},
{
"epoch": 0.03254237288135593,
"grad_norm": 1.109375,
"learning_rate": 9.835164835164835e-06,
"loss": 0.476,
"step": 12
},
{
"epoch": 0.03525423728813559,
"grad_norm": 1.1953125,
"learning_rate": 9.807692307692308e-06,
"loss": 0.7237,
"step": 13
},
{
"epoch": 0.03796610169491525,
"grad_norm": 1.1484375,
"learning_rate": 9.780219780219781e-06,
"loss": 0.5612,
"step": 14
},
{
"epoch": 0.04067796610169491,
"grad_norm": 1.046875,
"learning_rate": 9.752747252747253e-06,
"loss": 0.5471,
"step": 15
},
{
"epoch": 0.04338983050847458,
"grad_norm": 1.03125,
"learning_rate": 9.725274725274726e-06,
"loss": 0.5104,
"step": 16
},
{
"epoch": 0.04610169491525424,
"grad_norm": 0.90234375,
"learning_rate": 9.697802197802198e-06,
"loss": 0.4911,
"step": 17
},
{
"epoch": 0.0488135593220339,
"grad_norm": 0.82421875,
"learning_rate": 9.670329670329671e-06,
"loss": 0.3772,
"step": 18
},
{
"epoch": 0.05152542372881356,
"grad_norm": 0.96484375,
"learning_rate": 9.642857142857144e-06,
"loss": 0.495,
"step": 19
},
{
"epoch": 0.05423728813559322,
"grad_norm": 0.8359375,
"learning_rate": 9.615384615384616e-06,
"loss": 0.4638,
"step": 20
},
{
"epoch": 0.05423728813559322,
"eval_loss": 0.4768664240837097,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3113,
"eval_samples_per_second": 8.841,
"eval_steps_per_second": 4.42,
"step": 20
},
{
"epoch": 0.05694915254237288,
"grad_norm": 0.87890625,
"learning_rate": 9.587912087912089e-06,
"loss": 0.452,
"step": 21
},
{
"epoch": 0.059661016949152545,
"grad_norm": 0.79296875,
"learning_rate": 9.560439560439562e-06,
"loss": 0.4108,
"step": 22
},
{
"epoch": 0.062372881355932205,
"grad_norm": 0.73828125,
"learning_rate": 9.532967032967034e-06,
"loss": 0.3777,
"step": 23
},
{
"epoch": 0.06508474576271187,
"grad_norm": 0.8828125,
"learning_rate": 9.505494505494505e-06,
"loss": 0.577,
"step": 24
},
{
"epoch": 0.06779661016949153,
"grad_norm": 0.73046875,
"learning_rate": 9.478021978021978e-06,
"loss": 0.4745,
"step": 25
},
{
"epoch": 0.07050847457627119,
"grad_norm": 0.4609375,
"learning_rate": 9.450549450549452e-06,
"loss": 0.2241,
"step": 26
},
{
"epoch": 0.07322033898305084,
"grad_norm": 0.65625,
"learning_rate": 9.423076923076923e-06,
"loss": 0.4402,
"step": 27
},
{
"epoch": 0.0759322033898305,
"grad_norm": 0.578125,
"learning_rate": 9.395604395604396e-06,
"loss": 0.4457,
"step": 28
},
{
"epoch": 0.07864406779661016,
"grad_norm": 0.482421875,
"learning_rate": 9.36813186813187e-06,
"loss": 0.2633,
"step": 29
},
{
"epoch": 0.08135593220338982,
"grad_norm": 0.55859375,
"learning_rate": 9.340659340659341e-06,
"loss": 0.3224,
"step": 30
},
{
"epoch": 0.08135593220338982,
"eval_loss": 0.3509748876094818,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.2986,
"eval_samples_per_second": 8.851,
"eval_steps_per_second": 4.425,
"step": 30
},
{
"epoch": 0.0840677966101695,
"grad_norm": 0.60546875,
"learning_rate": 9.313186813186814e-06,
"loss": 0.442,
"step": 31
},
{
"epoch": 0.08677966101694916,
"grad_norm": 0.515625,
"learning_rate": 9.285714285714288e-06,
"loss": 0.3376,
"step": 32
},
{
"epoch": 0.08949152542372882,
"grad_norm": 0.48828125,
"learning_rate": 9.258241758241759e-06,
"loss": 0.3107,
"step": 33
},
{
"epoch": 0.09220338983050848,
"grad_norm": 0.41015625,
"learning_rate": 9.230769230769232e-06,
"loss": 0.2595,
"step": 34
},
{
"epoch": 0.09491525423728814,
"grad_norm": 0.515625,
"learning_rate": 9.203296703296704e-06,
"loss": 0.3329,
"step": 35
},
{
"epoch": 0.0976271186440678,
"grad_norm": 0.4453125,
"learning_rate": 9.175824175824175e-06,
"loss": 0.3234,
"step": 36
},
{
"epoch": 0.10033898305084746,
"grad_norm": 0.466796875,
"learning_rate": 9.148351648351649e-06,
"loss": 0.3335,
"step": 37
},
{
"epoch": 0.10305084745762712,
"grad_norm": 0.31640625,
"learning_rate": 9.120879120879122e-06,
"loss": 0.1704,
"step": 38
},
{
"epoch": 0.10576271186440678,
"grad_norm": 0.34765625,
"learning_rate": 9.093406593406593e-06,
"loss": 0.2484,
"step": 39
},
{
"epoch": 0.10847457627118644,
"grad_norm": 0.53515625,
"learning_rate": 9.065934065934067e-06,
"loss": 0.4135,
"step": 40
},
{
"epoch": 0.10847457627118644,
"eval_loss": 0.30060264468193054,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.2934,
"eval_samples_per_second": 8.855,
"eval_steps_per_second": 4.427,
"step": 40
},
{
"epoch": 0.1111864406779661,
"grad_norm": 0.333984375,
"learning_rate": 9.03846153846154e-06,
"loss": 0.2265,
"step": 41
},
{
"epoch": 0.11389830508474576,
"grad_norm": 0.421875,
"learning_rate": 9.010989010989011e-06,
"loss": 0.3255,
"step": 42
},
{
"epoch": 0.11661016949152542,
"grad_norm": 0.4453125,
"learning_rate": 8.983516483516484e-06,
"loss": 0.3604,
"step": 43
},
{
"epoch": 0.11932203389830509,
"grad_norm": 0.423828125,
"learning_rate": 8.956043956043958e-06,
"loss": 0.3244,
"step": 44
},
{
"epoch": 0.12203389830508475,
"grad_norm": 0.431640625,
"learning_rate": 8.92857142857143e-06,
"loss": 0.2405,
"step": 45
},
{
"epoch": 0.12474576271186441,
"grad_norm": 0.365234375,
"learning_rate": 8.9010989010989e-06,
"loss": 0.3178,
"step": 46
},
{
"epoch": 0.12745762711864406,
"grad_norm": 0.609375,
"learning_rate": 8.873626373626374e-06,
"loss": 0.5563,
"step": 47
},
{
"epoch": 0.13016949152542373,
"grad_norm": 0.337890625,
"learning_rate": 8.846153846153847e-06,
"loss": 0.1937,
"step": 48
},
{
"epoch": 0.13288135593220338,
"grad_norm": 0.455078125,
"learning_rate": 8.818681318681319e-06,
"loss": 0.3613,
"step": 49
},
{
"epoch": 0.13559322033898305,
"grad_norm": 0.326171875,
"learning_rate": 8.791208791208792e-06,
"loss": 0.1717,
"step": 50
},
{
"epoch": 0.13559322033898305,
"eval_loss": 0.27889174222946167,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.2964,
"eval_samples_per_second": 8.852,
"eval_steps_per_second": 4.426,
"step": 50
},
{
"epoch": 0.13830508474576272,
"grad_norm": 0.296875,
"learning_rate": 8.763736263736265e-06,
"loss": 0.1388,
"step": 51
},
{
"epoch": 0.14101694915254237,
"grad_norm": 0.369140625,
"learning_rate": 8.736263736263737e-06,
"loss": 0.3083,
"step": 52
},
{
"epoch": 0.14372881355932204,
"grad_norm": 0.447265625,
"learning_rate": 8.70879120879121e-06,
"loss": 0.3756,
"step": 53
},
{
"epoch": 0.1464406779661017,
"grad_norm": 0.298828125,
"learning_rate": 8.681318681318681e-06,
"loss": 0.279,
"step": 54
},
{
"epoch": 0.14915254237288136,
"grad_norm": 0.28125,
"learning_rate": 8.653846153846155e-06,
"loss": 0.1603,
"step": 55
},
{
"epoch": 0.151864406779661,
"grad_norm": 0.333984375,
"learning_rate": 8.626373626373628e-06,
"loss": 0.2036,
"step": 56
},
{
"epoch": 0.15457627118644068,
"grad_norm": 0.380859375,
"learning_rate": 8.5989010989011e-06,
"loss": 0.304,
"step": 57
},
{
"epoch": 0.15728813559322033,
"grad_norm": 0.283203125,
"learning_rate": 8.571428571428571e-06,
"loss": 0.164,
"step": 58
},
{
"epoch": 0.16,
"grad_norm": 0.388671875,
"learning_rate": 8.543956043956044e-06,
"loss": 0.2686,
"step": 59
},
{
"epoch": 0.16271186440677965,
"grad_norm": 0.3359375,
"learning_rate": 8.516483516483517e-06,
"loss": 0.2263,
"step": 60
},
{
"epoch": 0.16271186440677965,
"eval_loss": 0.26370447874069214,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.4114,
"eval_samples_per_second": 8.763,
"eval_steps_per_second": 4.382,
"step": 60
},
{
"epoch": 0.16542372881355932,
"grad_norm": 0.3828125,
"learning_rate": 8.489010989010989e-06,
"loss": 0.2605,
"step": 61
},
{
"epoch": 0.168135593220339,
"grad_norm": 0.3359375,
"learning_rate": 8.461538461538462e-06,
"loss": 0.2453,
"step": 62
},
{
"epoch": 0.17084745762711864,
"grad_norm": 0.29296875,
"learning_rate": 8.434065934065935e-06,
"loss": 0.1774,
"step": 63
},
{
"epoch": 0.17355932203389832,
"grad_norm": 0.314453125,
"learning_rate": 8.406593406593407e-06,
"loss": 0.2007,
"step": 64
},
{
"epoch": 0.17627118644067796,
"grad_norm": 0.255859375,
"learning_rate": 8.37912087912088e-06,
"loss": 0.1479,
"step": 65
},
{
"epoch": 0.17898305084745764,
"grad_norm": 0.3359375,
"learning_rate": 8.351648351648353e-06,
"loss": 0.1971,
"step": 66
},
{
"epoch": 0.18169491525423728,
"grad_norm": 0.25390625,
"learning_rate": 8.324175824175825e-06,
"loss": 0.1411,
"step": 67
},
{
"epoch": 0.18440677966101696,
"grad_norm": 0.3359375,
"learning_rate": 8.296703296703298e-06,
"loss": 0.2188,
"step": 68
},
{
"epoch": 0.1871186440677966,
"grad_norm": 0.287109375,
"learning_rate": 8.26923076923077e-06,
"loss": 0.157,
"step": 69
},
{
"epoch": 0.18983050847457628,
"grad_norm": 0.39453125,
"learning_rate": 8.241758241758243e-06,
"loss": 0.3359,
"step": 70
},
{
"epoch": 0.18983050847457628,
"eval_loss": 0.25340646505355835,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3213,
"eval_samples_per_second": 8.833,
"eval_steps_per_second": 4.416,
"step": 70
},
{
"epoch": 0.19254237288135592,
"grad_norm": 0.38671875,
"learning_rate": 8.214285714285714e-06,
"loss": 0.2741,
"step": 71
},
{
"epoch": 0.1952542372881356,
"grad_norm": 0.373046875,
"learning_rate": 8.186813186813188e-06,
"loss": 0.2986,
"step": 72
},
{
"epoch": 0.19796610169491524,
"grad_norm": 0.345703125,
"learning_rate": 8.15934065934066e-06,
"loss": 0.2458,
"step": 73
},
{
"epoch": 0.20067796610169492,
"grad_norm": 0.31640625,
"learning_rate": 8.131868131868132e-06,
"loss": 0.2598,
"step": 74
},
{
"epoch": 0.2033898305084746,
"grad_norm": 0.267578125,
"learning_rate": 8.104395604395605e-06,
"loss": 0.1463,
"step": 75
},
{
"epoch": 0.20610169491525424,
"grad_norm": 0.287109375,
"learning_rate": 8.076923076923077e-06,
"loss": 0.166,
"step": 76
},
{
"epoch": 0.2088135593220339,
"grad_norm": 0.4140625,
"learning_rate": 8.04945054945055e-06,
"loss": 0.2918,
"step": 77
},
{
"epoch": 0.21152542372881356,
"grad_norm": 0.296875,
"learning_rate": 8.021978021978023e-06,
"loss": 0.2058,
"step": 78
},
{
"epoch": 0.21423728813559323,
"grad_norm": 0.318359375,
"learning_rate": 7.994505494505495e-06,
"loss": 0.1957,
"step": 79
},
{
"epoch": 0.21694915254237288,
"grad_norm": 0.34765625,
"learning_rate": 7.967032967032966e-06,
"loss": 0.2418,
"step": 80
},
{
"epoch": 0.21694915254237288,
"eval_loss": 0.2449192851781845,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3151,
"eval_samples_per_second": 8.838,
"eval_steps_per_second": 4.419,
"step": 80
},
{
"epoch": 0.21966101694915255,
"grad_norm": 0.416015625,
"learning_rate": 7.93956043956044e-06,
"loss": 0.2543,
"step": 81
},
{
"epoch": 0.2223728813559322,
"grad_norm": 0.353515625,
"learning_rate": 7.912087912087913e-06,
"loss": 0.2527,
"step": 82
},
{
"epoch": 0.22508474576271187,
"grad_norm": 0.400390625,
"learning_rate": 7.884615384615384e-06,
"loss": 0.23,
"step": 83
},
{
"epoch": 0.22779661016949151,
"grad_norm": 0.447265625,
"learning_rate": 7.857142857142858e-06,
"loss": 0.2714,
"step": 84
},
{
"epoch": 0.2305084745762712,
"grad_norm": 0.3828125,
"learning_rate": 7.829670329670331e-06,
"loss": 0.2177,
"step": 85
},
{
"epoch": 0.23322033898305083,
"grad_norm": 0.3125,
"learning_rate": 7.802197802197802e-06,
"loss": 0.2209,
"step": 86
},
{
"epoch": 0.2359322033898305,
"grad_norm": 0.4140625,
"learning_rate": 7.774725274725276e-06,
"loss": 0.2322,
"step": 87
},
{
"epoch": 0.23864406779661018,
"grad_norm": 0.396484375,
"learning_rate": 7.747252747252749e-06,
"loss": 0.2553,
"step": 88
},
{
"epoch": 0.24135593220338983,
"grad_norm": 0.35546875,
"learning_rate": 7.71978021978022e-06,
"loss": 0.2081,
"step": 89
},
{
"epoch": 0.2440677966101695,
"grad_norm": 0.34375,
"learning_rate": 7.692307692307694e-06,
"loss": 0.159,
"step": 90
},
{
"epoch": 0.2440677966101695,
"eval_loss": 0.23829074203968048,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3009,
"eval_samples_per_second": 8.849,
"eval_steps_per_second": 4.424,
"step": 90
},
{
"epoch": 0.24677966101694915,
"grad_norm": 0.375,
"learning_rate": 7.664835164835167e-06,
"loss": 0.1883,
"step": 91
},
{
"epoch": 0.24949152542372882,
"grad_norm": 0.330078125,
"learning_rate": 7.637362637362638e-06,
"loss": 0.1843,
"step": 92
},
{
"epoch": 0.2522033898305085,
"grad_norm": 0.42578125,
"learning_rate": 7.60989010989011e-06,
"loss": 0.2159,
"step": 93
},
{
"epoch": 0.2549152542372881,
"grad_norm": 0.447265625,
"learning_rate": 7.582417582417583e-06,
"loss": 0.2595,
"step": 94
},
{
"epoch": 0.2576271186440678,
"grad_norm": 0.37109375,
"learning_rate": 7.5549450549450554e-06,
"loss": 0.2701,
"step": 95
},
{
"epoch": 0.26033898305084746,
"grad_norm": 0.40234375,
"learning_rate": 7.527472527472528e-06,
"loss": 0.2664,
"step": 96
},
{
"epoch": 0.26305084745762713,
"grad_norm": 0.404296875,
"learning_rate": 7.500000000000001e-06,
"loss": 0.2232,
"step": 97
},
{
"epoch": 0.26576271186440675,
"grad_norm": 0.380859375,
"learning_rate": 7.472527472527473e-06,
"loss": 0.2196,
"step": 98
},
{
"epoch": 0.2684745762711864,
"grad_norm": 0.380859375,
"learning_rate": 7.445054945054946e-06,
"loss": 0.2688,
"step": 99
},
{
"epoch": 0.2711864406779661,
"grad_norm": 0.283203125,
"learning_rate": 7.417582417582418e-06,
"loss": 0.1546,
"step": 100
},
{
"epoch": 0.2711864406779661,
"eval_loss": 0.2324570119380951,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3447,
"eval_samples_per_second": 8.815,
"eval_steps_per_second": 4.407,
"step": 100
},
{
"epoch": 0.2738983050847458,
"grad_norm": 0.423828125,
"learning_rate": 7.390109890109891e-06,
"loss": 0.2543,
"step": 101
},
{
"epoch": 0.27661016949152545,
"grad_norm": 0.451171875,
"learning_rate": 7.362637362637364e-06,
"loss": 0.3211,
"step": 102
},
{
"epoch": 0.27932203389830507,
"grad_norm": 0.36328125,
"learning_rate": 7.335164835164835e-06,
"loss": 0.2333,
"step": 103
},
{
"epoch": 0.28203389830508474,
"grad_norm": 0.38671875,
"learning_rate": 7.307692307692308e-06,
"loss": 0.2475,
"step": 104
},
{
"epoch": 0.2847457627118644,
"grad_norm": 0.412109375,
"learning_rate": 7.280219780219781e-06,
"loss": 0.3498,
"step": 105
},
{
"epoch": 0.2874576271186441,
"grad_norm": 0.376953125,
"learning_rate": 7.252747252747253e-06,
"loss": 0.2406,
"step": 106
},
{
"epoch": 0.2901694915254237,
"grad_norm": 0.390625,
"learning_rate": 7.225274725274726e-06,
"loss": 0.2182,
"step": 107
},
{
"epoch": 0.2928813559322034,
"grad_norm": 0.3203125,
"learning_rate": 7.197802197802198e-06,
"loss": 0.1852,
"step": 108
},
{
"epoch": 0.29559322033898305,
"grad_norm": 0.337890625,
"learning_rate": 7.170329670329671e-06,
"loss": 0.1863,
"step": 109
},
{
"epoch": 0.2983050847457627,
"grad_norm": 0.412109375,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.26,
"step": 110
},
{
"epoch": 0.2983050847457627,
"eval_loss": 0.22855669260025024,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.438,
"eval_samples_per_second": 8.743,
"eval_steps_per_second": 4.371,
"step": 110
},
{
"epoch": 0.30101694915254235,
"grad_norm": 0.361328125,
"learning_rate": 7.115384615384616e-06,
"loss": 0.1708,
"step": 111
},
{
"epoch": 0.303728813559322,
"grad_norm": 0.388671875,
"learning_rate": 7.087912087912089e-06,
"loss": 0.2213,
"step": 112
},
{
"epoch": 0.3064406779661017,
"grad_norm": 0.28515625,
"learning_rate": 7.0604395604395615e-06,
"loss": 0.135,
"step": 113
},
{
"epoch": 0.30915254237288137,
"grad_norm": 0.3046875,
"learning_rate": 7.032967032967034e-06,
"loss": 0.1405,
"step": 114
},
{
"epoch": 0.31186440677966104,
"grad_norm": 0.431640625,
"learning_rate": 7.005494505494505e-06,
"loss": 0.2475,
"step": 115
},
{
"epoch": 0.31457627118644066,
"grad_norm": 0.2333984375,
"learning_rate": 6.978021978021979e-06,
"loss": 0.0942,
"step": 116
},
{
"epoch": 0.31728813559322033,
"grad_norm": 0.318359375,
"learning_rate": 6.950549450549451e-06,
"loss": 0.1643,
"step": 117
},
{
"epoch": 0.32,
"grad_norm": 0.2255859375,
"learning_rate": 6.923076923076923e-06,
"loss": 0.0807,
"step": 118
},
{
"epoch": 0.3227118644067797,
"grad_norm": 0.462890625,
"learning_rate": 6.895604395604396e-06,
"loss": 0.2953,
"step": 119
},
{
"epoch": 0.3254237288135593,
"grad_norm": 0.31640625,
"learning_rate": 6.868131868131869e-06,
"loss": 0.1632,
"step": 120
},
{
"epoch": 0.3254237288135593,
"eval_loss": 0.22415581345558167,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5405,
"eval_samples_per_second": 8.665,
"eval_steps_per_second": 4.333,
"step": 120
},
{
"epoch": 0.328135593220339,
"grad_norm": 0.49609375,
"learning_rate": 6.840659340659341e-06,
"loss": 0.2407,
"step": 121
},
{
"epoch": 0.33084745762711865,
"grad_norm": 0.365234375,
"learning_rate": 6.813186813186814e-06,
"loss": 0.229,
"step": 122
},
{
"epoch": 0.3335593220338983,
"grad_norm": 0.361328125,
"learning_rate": 6.785714285714287e-06,
"loss": 0.2249,
"step": 123
},
{
"epoch": 0.336271186440678,
"grad_norm": 0.369140625,
"learning_rate": 6.758241758241759e-06,
"loss": 0.1877,
"step": 124
},
{
"epoch": 0.3389830508474576,
"grad_norm": 0.32421875,
"learning_rate": 6.730769230769232e-06,
"loss": 0.1355,
"step": 125
},
{
"epoch": 0.3416949152542373,
"grad_norm": 0.494140625,
"learning_rate": 6.703296703296703e-06,
"loss": 0.3479,
"step": 126
},
{
"epoch": 0.34440677966101696,
"grad_norm": 0.30859375,
"learning_rate": 6.6758241758241756e-06,
"loss": 0.2047,
"step": 127
},
{
"epoch": 0.34711864406779663,
"grad_norm": 0.36328125,
"learning_rate": 6.648351648351649e-06,
"loss": 0.1821,
"step": 128
},
{
"epoch": 0.34983050847457625,
"grad_norm": 0.3828125,
"learning_rate": 6.620879120879121e-06,
"loss": 0.232,
"step": 129
},
{
"epoch": 0.3525423728813559,
"grad_norm": 0.34765625,
"learning_rate": 6.5934065934065935e-06,
"loss": 0.2048,
"step": 130
},
{
"epoch": 0.3525423728813559,
"eval_loss": 0.2212015688419342,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.7884,
"eval_samples_per_second": 8.483,
"eval_steps_per_second": 4.241,
"step": 130
},
{
"epoch": 0.3552542372881356,
"grad_norm": 0.408203125,
"learning_rate": 6.565934065934067e-06,
"loss": 0.2823,
"step": 131
},
{
"epoch": 0.3579661016949153,
"grad_norm": 0.291015625,
"learning_rate": 6.538461538461539e-06,
"loss": 0.1487,
"step": 132
},
{
"epoch": 0.3606779661016949,
"grad_norm": 0.46484375,
"learning_rate": 6.5109890109890115e-06,
"loss": 0.2647,
"step": 133
},
{
"epoch": 0.36338983050847457,
"grad_norm": 0.279296875,
"learning_rate": 6.483516483516485e-06,
"loss": 0.1578,
"step": 134
},
{
"epoch": 0.36610169491525424,
"grad_norm": 0.34765625,
"learning_rate": 6.456043956043957e-06,
"loss": 0.1861,
"step": 135
},
{
"epoch": 0.3688135593220339,
"grad_norm": 0.341796875,
"learning_rate": 6.4285714285714295e-06,
"loss": 0.2132,
"step": 136
},
{
"epoch": 0.3715254237288136,
"grad_norm": 0.384765625,
"learning_rate": 6.401098901098901e-06,
"loss": 0.2278,
"step": 137
},
{
"epoch": 0.3742372881355932,
"grad_norm": 0.302734375,
"learning_rate": 6.373626373626373e-06,
"loss": 0.1749,
"step": 138
},
{
"epoch": 0.3769491525423729,
"grad_norm": 0.40625,
"learning_rate": 6.3461538461538466e-06,
"loss": 0.2314,
"step": 139
},
{
"epoch": 0.37966101694915255,
"grad_norm": 0.384765625,
"learning_rate": 6.318681318681319e-06,
"loss": 0.2517,
"step": 140
},
{
"epoch": 0.37966101694915255,
"eval_loss": 0.21904973685741425,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3876,
"eval_samples_per_second": 8.781,
"eval_steps_per_second": 4.391,
"step": 140
},
{
"epoch": 0.3823728813559322,
"grad_norm": 0.3125,
"learning_rate": 6.291208791208791e-06,
"loss": 0.2115,
"step": 141
},
{
"epoch": 0.38508474576271184,
"grad_norm": 0.41796875,
"learning_rate": 6.2637362637362645e-06,
"loss": 0.1945,
"step": 142
},
{
"epoch": 0.3877966101694915,
"grad_norm": 0.392578125,
"learning_rate": 6.236263736263737e-06,
"loss": 0.239,
"step": 143
},
{
"epoch": 0.3905084745762712,
"grad_norm": 0.458984375,
"learning_rate": 6.208791208791209e-06,
"loss": 0.356,
"step": 144
},
{
"epoch": 0.39322033898305087,
"grad_norm": 0.388671875,
"learning_rate": 6.181318681318682e-06,
"loss": 0.2406,
"step": 145
},
{
"epoch": 0.3959322033898305,
"grad_norm": 0.28125,
"learning_rate": 6.153846153846155e-06,
"loss": 0.1312,
"step": 146
},
{
"epoch": 0.39864406779661016,
"grad_norm": 0.38671875,
"learning_rate": 6.126373626373627e-06,
"loss": 0.2204,
"step": 147
},
{
"epoch": 0.40135593220338983,
"grad_norm": 0.333984375,
"learning_rate": 6.0989010989011e-06,
"loss": 0.1769,
"step": 148
},
{
"epoch": 0.4040677966101695,
"grad_norm": 0.345703125,
"learning_rate": 6.071428571428571e-06,
"loss": 0.1817,
"step": 149
},
{
"epoch": 0.4067796610169492,
"grad_norm": 0.51953125,
"learning_rate": 6.043956043956044e-06,
"loss": 0.3207,
"step": 150
},
{
"epoch": 0.4067796610169492,
"eval_loss": 0.21635417640209198,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3802,
"eval_samples_per_second": 8.787,
"eval_steps_per_second": 4.394,
"step": 150
},
{
"epoch": 0.4094915254237288,
"grad_norm": 0.341796875,
"learning_rate": 6.016483516483517e-06,
"loss": 0.1799,
"step": 151
},
{
"epoch": 0.41220338983050847,
"grad_norm": 0.484375,
"learning_rate": 5.989010989010989e-06,
"loss": 0.2839,
"step": 152
},
{
"epoch": 0.41491525423728814,
"grad_norm": 0.255859375,
"learning_rate": 5.961538461538462e-06,
"loss": 0.1082,
"step": 153
},
{
"epoch": 0.4176271186440678,
"grad_norm": 0.369140625,
"learning_rate": 5.934065934065935e-06,
"loss": 0.2106,
"step": 154
},
{
"epoch": 0.42033898305084744,
"grad_norm": 0.3828125,
"learning_rate": 5.906593406593407e-06,
"loss": 0.2032,
"step": 155
},
{
"epoch": 0.4230508474576271,
"grad_norm": 0.400390625,
"learning_rate": 5.8791208791208794e-06,
"loss": 0.2513,
"step": 156
},
{
"epoch": 0.4257627118644068,
"grad_norm": 0.265625,
"learning_rate": 5.851648351648353e-06,
"loss": 0.1126,
"step": 157
},
{
"epoch": 0.42847457627118646,
"grad_norm": 0.408203125,
"learning_rate": 5.824175824175825e-06,
"loss": 0.2326,
"step": 158
},
{
"epoch": 0.4311864406779661,
"grad_norm": 0.341796875,
"learning_rate": 5.796703296703297e-06,
"loss": 0.161,
"step": 159
},
{
"epoch": 0.43389830508474575,
"grad_norm": 0.373046875,
"learning_rate": 5.769230769230769e-06,
"loss": 0.2494,
"step": 160
},
{
"epoch": 0.43389830508474575,
"eval_loss": 0.21449893712997437,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3764,
"eval_samples_per_second": 8.79,
"eval_steps_per_second": 4.395,
"step": 160
},
{
"epoch": 0.4366101694915254,
"grad_norm": 0.32421875,
"learning_rate": 5.741758241758242e-06,
"loss": 0.1967,
"step": 161
},
{
"epoch": 0.4393220338983051,
"grad_norm": 0.33984375,
"learning_rate": 5.7142857142857145e-06,
"loss": 0.172,
"step": 162
},
{
"epoch": 0.44203389830508477,
"grad_norm": 0.3203125,
"learning_rate": 5.686813186813187e-06,
"loss": 0.1679,
"step": 163
},
{
"epoch": 0.4447457627118644,
"grad_norm": 0.451171875,
"learning_rate": 5.65934065934066e-06,
"loss": 0.2689,
"step": 164
},
{
"epoch": 0.44745762711864406,
"grad_norm": 0.35546875,
"learning_rate": 5.6318681318681325e-06,
"loss": 0.1948,
"step": 165
},
{
"epoch": 0.45016949152542374,
"grad_norm": 0.46484375,
"learning_rate": 5.604395604395605e-06,
"loss": 0.2931,
"step": 166
},
{
"epoch": 0.4528813559322034,
"grad_norm": 0.408203125,
"learning_rate": 5.576923076923077e-06,
"loss": 0.1679,
"step": 167
},
{
"epoch": 0.45559322033898303,
"grad_norm": 0.384765625,
"learning_rate": 5.5494505494505504e-06,
"loss": 0.2063,
"step": 168
},
{
"epoch": 0.4583050847457627,
"grad_norm": 0.51171875,
"learning_rate": 5.521978021978023e-06,
"loss": 0.2895,
"step": 169
},
{
"epoch": 0.4610169491525424,
"grad_norm": 0.365234375,
"learning_rate": 5.494505494505495e-06,
"loss": 0.1781,
"step": 170
},
{
"epoch": 0.4610169491525424,
"eval_loss": 0.2126615196466446,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3672,
"eval_samples_per_second": 8.797,
"eval_steps_per_second": 4.399,
"step": 170
},
{
"epoch": 0.46372881355932205,
"grad_norm": 0.375,
"learning_rate": 5.467032967032967e-06,
"loss": 0.1829,
"step": 171
},
{
"epoch": 0.46644067796610167,
"grad_norm": 0.396484375,
"learning_rate": 5.43956043956044e-06,
"loss": 0.2247,
"step": 172
},
{
"epoch": 0.46915254237288134,
"grad_norm": 0.4140625,
"learning_rate": 5.412087912087912e-06,
"loss": 0.2509,
"step": 173
},
{
"epoch": 0.471864406779661,
"grad_norm": 0.455078125,
"learning_rate": 5.384615384615385e-06,
"loss": 0.248,
"step": 174
},
{
"epoch": 0.4745762711864407,
"grad_norm": 0.439453125,
"learning_rate": 5.357142857142857e-06,
"loss": 0.2222,
"step": 175
},
{
"epoch": 0.47728813559322036,
"grad_norm": 0.404296875,
"learning_rate": 5.32967032967033e-06,
"loss": 0.1882,
"step": 176
},
{
"epoch": 0.48,
"grad_norm": 0.3984375,
"learning_rate": 5.302197802197803e-06,
"loss": 0.1951,
"step": 177
},
{
"epoch": 0.48271186440677966,
"grad_norm": 0.43359375,
"learning_rate": 5.274725274725275e-06,
"loss": 0.1958,
"step": 178
},
{
"epoch": 0.48542372881355933,
"grad_norm": 0.359375,
"learning_rate": 5.247252747252748e-06,
"loss": 0.164,
"step": 179
},
{
"epoch": 0.488135593220339,
"grad_norm": 0.390625,
"learning_rate": 5.219780219780221e-06,
"loss": 0.2376,
"step": 180
},
{
"epoch": 0.488135593220339,
"eval_loss": 0.2105218917131424,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3696,
"eval_samples_per_second": 8.795,
"eval_steps_per_second": 4.398,
"step": 180
},
{
"epoch": 0.4908474576271186,
"grad_norm": 0.375,
"learning_rate": 5.192307692307693e-06,
"loss": 0.1714,
"step": 181
},
{
"epoch": 0.4935593220338983,
"grad_norm": 0.376953125,
"learning_rate": 5.164835164835166e-06,
"loss": 0.1925,
"step": 182
},
{
"epoch": 0.49627118644067797,
"grad_norm": 0.37890625,
"learning_rate": 5.137362637362638e-06,
"loss": 0.1919,
"step": 183
},
{
"epoch": 0.49898305084745764,
"grad_norm": 0.546875,
"learning_rate": 5.10989010989011e-06,
"loss": 0.2434,
"step": 184
},
{
"epoch": 0.5016949152542373,
"grad_norm": 0.314453125,
"learning_rate": 5.0824175824175824e-06,
"loss": 0.1381,
"step": 185
},
{
"epoch": 0.504406779661017,
"grad_norm": 0.482421875,
"learning_rate": 5.054945054945055e-06,
"loss": 0.2796,
"step": 186
},
{
"epoch": 0.5071186440677966,
"grad_norm": 0.40234375,
"learning_rate": 5.027472527472528e-06,
"loss": 0.1945,
"step": 187
},
{
"epoch": 0.5098305084745762,
"grad_norm": 0.431640625,
"learning_rate": 5e-06,
"loss": 0.2571,
"step": 188
},
{
"epoch": 0.512542372881356,
"grad_norm": 0.455078125,
"learning_rate": 4.972527472527473e-06,
"loss": 0.2749,
"step": 189
},
{
"epoch": 0.5152542372881356,
"grad_norm": 0.42578125,
"learning_rate": 4.945054945054946e-06,
"loss": 0.2185,
"step": 190
},
{
"epoch": 0.5152542372881356,
"eval_loss": 0.2096569836139679,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3456,
"eval_samples_per_second": 8.814,
"eval_steps_per_second": 4.407,
"step": 190
},
{
"epoch": 0.5179661016949153,
"grad_norm": 0.404296875,
"learning_rate": 4.9175824175824175e-06,
"loss": 0.2161,
"step": 191
},
{
"epoch": 0.5206779661016949,
"grad_norm": 0.421875,
"learning_rate": 4.890109890109891e-06,
"loss": 0.237,
"step": 192
},
{
"epoch": 0.5233898305084745,
"grad_norm": 0.40234375,
"learning_rate": 4.862637362637363e-06,
"loss": 0.2074,
"step": 193
},
{
"epoch": 0.5261016949152543,
"grad_norm": 0.373046875,
"learning_rate": 4.8351648351648355e-06,
"loss": 0.2588,
"step": 194
},
{
"epoch": 0.5288135593220339,
"grad_norm": 0.451171875,
"learning_rate": 4.807692307692308e-06,
"loss": 0.2358,
"step": 195
},
{
"epoch": 0.5315254237288135,
"grad_norm": 0.3046875,
"learning_rate": 4.780219780219781e-06,
"loss": 0.1249,
"step": 196
},
{
"epoch": 0.5342372881355932,
"grad_norm": 0.41015625,
"learning_rate": 4.752747252747253e-06,
"loss": 0.1959,
"step": 197
},
{
"epoch": 0.5369491525423729,
"grad_norm": 0.2451171875,
"learning_rate": 4.725274725274726e-06,
"loss": 0.1217,
"step": 198
},
{
"epoch": 0.5396610169491526,
"grad_norm": 0.310546875,
"learning_rate": 4.697802197802198e-06,
"loss": 0.1453,
"step": 199
},
{
"epoch": 0.5423728813559322,
"grad_norm": 0.416015625,
"learning_rate": 4.6703296703296706e-06,
"loss": 0.2259,
"step": 200
},
{
"epoch": 0.5423728813559322,
"eval_loss": 0.20834746956825256,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3813,
"eval_samples_per_second": 8.786,
"eval_steps_per_second": 4.393,
"step": 200
},
{
"epoch": 0.5450847457627118,
"grad_norm": 0.408203125,
"learning_rate": 4.642857142857144e-06,
"loss": 0.1652,
"step": 201
},
{
"epoch": 0.5477966101694915,
"grad_norm": 0.392578125,
"learning_rate": 4.615384615384616e-06,
"loss": 0.2135,
"step": 202
},
{
"epoch": 0.5505084745762712,
"grad_norm": 0.365234375,
"learning_rate": 4.587912087912088e-06,
"loss": 0.2018,
"step": 203
},
{
"epoch": 0.5532203389830509,
"grad_norm": 0.330078125,
"learning_rate": 4.560439560439561e-06,
"loss": 0.1461,
"step": 204
},
{
"epoch": 0.5559322033898305,
"grad_norm": 0.4609375,
"learning_rate": 4.532967032967033e-06,
"loss": 0.2551,
"step": 205
},
{
"epoch": 0.5586440677966101,
"grad_norm": 0.423828125,
"learning_rate": 4.505494505494506e-06,
"loss": 0.1913,
"step": 206
},
{
"epoch": 0.5613559322033899,
"grad_norm": 0.4375,
"learning_rate": 4.478021978021979e-06,
"loss": 0.2274,
"step": 207
},
{
"epoch": 0.5640677966101695,
"grad_norm": 0.380859375,
"learning_rate": 4.45054945054945e-06,
"loss": 0.1962,
"step": 208
},
{
"epoch": 0.5667796610169491,
"grad_norm": 0.494140625,
"learning_rate": 4.423076923076924e-06,
"loss": 0.2946,
"step": 209
},
{
"epoch": 0.5694915254237288,
"grad_norm": 0.4609375,
"learning_rate": 4.395604395604396e-06,
"loss": 0.2301,
"step": 210
},
{
"epoch": 0.5694915254237288,
"eval_loss": 0.20648249983787537,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3454,
"eval_samples_per_second": 8.814,
"eval_steps_per_second": 4.407,
"step": 210
},
{
"epoch": 0.5722033898305084,
"grad_norm": 0.39453125,
"learning_rate": 4.368131868131868e-06,
"loss": 0.2761,
"step": 211
},
{
"epoch": 0.5749152542372882,
"grad_norm": 0.306640625,
"learning_rate": 4.340659340659341e-06,
"loss": 0.1019,
"step": 212
},
{
"epoch": 0.5776271186440678,
"grad_norm": 0.51953125,
"learning_rate": 4.313186813186814e-06,
"loss": 0.3189,
"step": 213
},
{
"epoch": 0.5803389830508474,
"grad_norm": 0.40625,
"learning_rate": 4.2857142857142855e-06,
"loss": 0.222,
"step": 214
},
{
"epoch": 0.5830508474576271,
"grad_norm": 0.45703125,
"learning_rate": 4.258241758241759e-06,
"loss": 0.271,
"step": 215
},
{
"epoch": 0.5857627118644068,
"grad_norm": 0.421875,
"learning_rate": 4.230769230769231e-06,
"loss": 0.2659,
"step": 216
},
{
"epoch": 0.5884745762711865,
"grad_norm": 0.3125,
"learning_rate": 4.203296703296703e-06,
"loss": 0.1449,
"step": 217
},
{
"epoch": 0.5911864406779661,
"grad_norm": 0.482421875,
"learning_rate": 4.175824175824177e-06,
"loss": 0.3675,
"step": 218
},
{
"epoch": 0.5938983050847457,
"grad_norm": 0.423828125,
"learning_rate": 4.148351648351649e-06,
"loss": 0.2713,
"step": 219
},
{
"epoch": 0.5966101694915255,
"grad_norm": 0.357421875,
"learning_rate": 4.120879120879121e-06,
"loss": 0.1717,
"step": 220
},
{
"epoch": 0.5966101694915255,
"eval_loss": 0.20527859032154083,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3389,
"eval_samples_per_second": 8.819,
"eval_steps_per_second": 4.41,
"step": 220
},
{
"epoch": 0.5993220338983051,
"grad_norm": 0.380859375,
"learning_rate": 4.093406593406594e-06,
"loss": 0.2136,
"step": 221
},
{
"epoch": 0.6020338983050847,
"grad_norm": 0.259765625,
"learning_rate": 4.065934065934066e-06,
"loss": 0.1038,
"step": 222
},
{
"epoch": 0.6047457627118644,
"grad_norm": 0.345703125,
"learning_rate": 4.0384615384615385e-06,
"loss": 0.1795,
"step": 223
},
{
"epoch": 0.607457627118644,
"grad_norm": 0.359375,
"learning_rate": 4.010989010989012e-06,
"loss": 0.2246,
"step": 224
},
{
"epoch": 0.6101694915254238,
"grad_norm": 0.380859375,
"learning_rate": 3.983516483516483e-06,
"loss": 0.2252,
"step": 225
},
{
"epoch": 0.6128813559322034,
"grad_norm": 0.396484375,
"learning_rate": 3.9560439560439565e-06,
"loss": 0.2359,
"step": 226
},
{
"epoch": 0.615593220338983,
"grad_norm": 0.3359375,
"learning_rate": 3.928571428571429e-06,
"loss": 0.1369,
"step": 227
},
{
"epoch": 0.6183050847457627,
"grad_norm": 0.43359375,
"learning_rate": 3.901098901098901e-06,
"loss": 0.2164,
"step": 228
},
{
"epoch": 0.6210169491525424,
"grad_norm": 0.37890625,
"learning_rate": 3.873626373626374e-06,
"loss": 0.1927,
"step": 229
},
{
"epoch": 0.6237288135593221,
"grad_norm": 0.341796875,
"learning_rate": 3.846153846153847e-06,
"loss": 0.1609,
"step": 230
},
{
"epoch": 0.6237288135593221,
"eval_loss": 0.20454387366771698,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.4641,
"eval_samples_per_second": 8.723,
"eval_steps_per_second": 4.361,
"step": 230
},
{
"epoch": 0.6264406779661017,
"grad_norm": 0.41796875,
"learning_rate": 3.818681318681319e-06,
"loss": 0.2662,
"step": 231
},
{
"epoch": 0.6291525423728813,
"grad_norm": 0.306640625,
"learning_rate": 3.7912087912087915e-06,
"loss": 0.1026,
"step": 232
},
{
"epoch": 0.631864406779661,
"grad_norm": 0.30859375,
"learning_rate": 3.763736263736264e-06,
"loss": 0.1002,
"step": 233
},
{
"epoch": 0.6345762711864407,
"grad_norm": 0.408203125,
"learning_rate": 3.7362637362637367e-06,
"loss": 0.1949,
"step": 234
},
{
"epoch": 0.6372881355932203,
"grad_norm": 0.380859375,
"learning_rate": 3.708791208791209e-06,
"loss": 0.2105,
"step": 235
},
{
"epoch": 0.64,
"grad_norm": 0.431640625,
"learning_rate": 3.681318681318682e-06,
"loss": 0.24,
"step": 236
},
{
"epoch": 0.6427118644067796,
"grad_norm": 0.48046875,
"learning_rate": 3.653846153846154e-06,
"loss": 0.2764,
"step": 237
},
{
"epoch": 0.6454237288135594,
"grad_norm": 0.33203125,
"learning_rate": 3.6263736263736266e-06,
"loss": 0.1852,
"step": 238
},
{
"epoch": 0.648135593220339,
"grad_norm": 0.3125,
"learning_rate": 3.598901098901099e-06,
"loss": 0.1494,
"step": 239
},
{
"epoch": 0.6508474576271186,
"grad_norm": 0.328125,
"learning_rate": 3.5714285714285718e-06,
"loss": 0.1097,
"step": 240
},
{
"epoch": 0.6508474576271186,
"eval_loss": 0.2033514529466629,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5749,
"eval_samples_per_second": 8.639,
"eval_steps_per_second": 4.32,
"step": 240
},
{
"epoch": 0.6535593220338983,
"grad_norm": 0.349609375,
"learning_rate": 3.5439560439560446e-06,
"loss": 0.1553,
"step": 241
},
{
"epoch": 0.656271186440678,
"grad_norm": 0.310546875,
"learning_rate": 3.516483516483517e-06,
"loss": 0.181,
"step": 242
},
{
"epoch": 0.6589830508474577,
"grad_norm": 0.384765625,
"learning_rate": 3.4890109890109893e-06,
"loss": 0.171,
"step": 243
},
{
"epoch": 0.6616949152542373,
"grad_norm": 0.357421875,
"learning_rate": 3.4615384615384617e-06,
"loss": 0.1853,
"step": 244
},
{
"epoch": 0.6644067796610169,
"grad_norm": 0.53125,
"learning_rate": 3.4340659340659345e-06,
"loss": 0.2538,
"step": 245
},
{
"epoch": 0.6671186440677966,
"grad_norm": 0.3359375,
"learning_rate": 3.406593406593407e-06,
"loss": 0.1439,
"step": 246
},
{
"epoch": 0.6698305084745763,
"grad_norm": 0.3515625,
"learning_rate": 3.3791208791208797e-06,
"loss": 0.1826,
"step": 247
},
{
"epoch": 0.672542372881356,
"grad_norm": 0.3828125,
"learning_rate": 3.3516483516483516e-06,
"loss": 0.2208,
"step": 248
},
{
"epoch": 0.6752542372881356,
"grad_norm": 0.365234375,
"learning_rate": 3.3241758241758244e-06,
"loss": 0.1267,
"step": 249
},
{
"epoch": 0.6779661016949152,
"grad_norm": 0.345703125,
"learning_rate": 3.2967032967032968e-06,
"loss": 0.1361,
"step": 250
},
{
"epoch": 0.6779661016949152,
"eval_loss": 0.20349906384944916,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5599,
"eval_samples_per_second": 8.651,
"eval_steps_per_second": 4.325,
"step": 250
},
{
"epoch": 0.680677966101695,
"grad_norm": 0.478515625,
"learning_rate": 3.2692307692307696e-06,
"loss": 0.3215,
"step": 251
},
{
"epoch": 0.6833898305084746,
"grad_norm": 0.455078125,
"learning_rate": 3.2417582417582424e-06,
"loss": 0.2283,
"step": 252
},
{
"epoch": 0.6861016949152542,
"grad_norm": 0.404296875,
"learning_rate": 3.2142857142857147e-06,
"loss": 0.179,
"step": 253
},
{
"epoch": 0.6888135593220339,
"grad_norm": 0.33203125,
"learning_rate": 3.1868131868131867e-06,
"loss": 0.1262,
"step": 254
},
{
"epoch": 0.6915254237288135,
"grad_norm": 0.34375,
"learning_rate": 3.1593406593406595e-06,
"loss": 0.1797,
"step": 255
},
{
"epoch": 0.6942372881355933,
"grad_norm": 0.453125,
"learning_rate": 3.1318681318681323e-06,
"loss": 0.2528,
"step": 256
},
{
"epoch": 0.6969491525423729,
"grad_norm": 0.404296875,
"learning_rate": 3.1043956043956046e-06,
"loss": 0.1432,
"step": 257
},
{
"epoch": 0.6996610169491525,
"grad_norm": 0.35546875,
"learning_rate": 3.0769230769230774e-06,
"loss": 0.1417,
"step": 258
},
{
"epoch": 0.7023728813559322,
"grad_norm": 0.443359375,
"learning_rate": 3.04945054945055e-06,
"loss": 0.2974,
"step": 259
},
{
"epoch": 0.7050847457627119,
"grad_norm": 0.361328125,
"learning_rate": 3.021978021978022e-06,
"loss": 0.2162,
"step": 260
},
{
"epoch": 0.7050847457627119,
"eval_loss": 0.20323127508163452,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5157,
"eval_samples_per_second": 8.684,
"eval_steps_per_second": 4.342,
"step": 260
},
{
"epoch": 0.7077966101694916,
"grad_norm": 0.412109375,
"learning_rate": 2.9945054945054946e-06,
"loss": 0.1693,
"step": 261
},
{
"epoch": 0.7105084745762712,
"grad_norm": 0.3125,
"learning_rate": 2.9670329670329673e-06,
"loss": 0.1286,
"step": 262
},
{
"epoch": 0.7132203389830508,
"grad_norm": 0.380859375,
"learning_rate": 2.9395604395604397e-06,
"loss": 0.1939,
"step": 263
},
{
"epoch": 0.7159322033898305,
"grad_norm": 0.287109375,
"learning_rate": 2.9120879120879125e-06,
"loss": 0.1059,
"step": 264
},
{
"epoch": 0.7186440677966102,
"grad_norm": 0.4296875,
"learning_rate": 2.8846153846153845e-06,
"loss": 0.1912,
"step": 265
},
{
"epoch": 0.7213559322033898,
"grad_norm": 0.318359375,
"learning_rate": 2.8571428571428573e-06,
"loss": 0.1356,
"step": 266
},
{
"epoch": 0.7240677966101695,
"grad_norm": 0.333984375,
"learning_rate": 2.82967032967033e-06,
"loss": 0.1228,
"step": 267
},
{
"epoch": 0.7267796610169491,
"grad_norm": 0.37109375,
"learning_rate": 2.8021978021978024e-06,
"loss": 0.2007,
"step": 268
},
{
"epoch": 0.7294915254237289,
"grad_norm": 0.37890625,
"learning_rate": 2.7747252747252752e-06,
"loss": 0.2002,
"step": 269
},
{
"epoch": 0.7322033898305085,
"grad_norm": 0.3984375,
"learning_rate": 2.7472527472527476e-06,
"loss": 0.1859,
"step": 270
},
{
"epoch": 0.7322033898305085,
"eval_loss": 0.20238856971263885,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5157,
"eval_samples_per_second": 8.684,
"eval_steps_per_second": 4.342,
"step": 270
},
{
"epoch": 0.7349152542372881,
"grad_norm": 0.359375,
"learning_rate": 2.71978021978022e-06,
"loss": 0.1753,
"step": 271
},
{
"epoch": 0.7376271186440678,
"grad_norm": 0.484375,
"learning_rate": 2.6923076923076923e-06,
"loss": 0.1885,
"step": 272
},
{
"epoch": 0.7403389830508474,
"grad_norm": 0.416015625,
"learning_rate": 2.664835164835165e-06,
"loss": 0.2015,
"step": 273
},
{
"epoch": 0.7430508474576272,
"grad_norm": 0.3359375,
"learning_rate": 2.6373626373626375e-06,
"loss": 0.1205,
"step": 274
},
{
"epoch": 0.7457627118644068,
"grad_norm": 0.404296875,
"learning_rate": 2.6098901098901103e-06,
"loss": 0.1731,
"step": 275
},
{
"epoch": 0.7484745762711864,
"grad_norm": 0.4609375,
"learning_rate": 2.582417582417583e-06,
"loss": 0.2409,
"step": 276
},
{
"epoch": 0.7511864406779661,
"grad_norm": 0.3828125,
"learning_rate": 2.554945054945055e-06,
"loss": 0.2074,
"step": 277
},
{
"epoch": 0.7538983050847458,
"grad_norm": 0.51953125,
"learning_rate": 2.5274725274725274e-06,
"loss": 0.3189,
"step": 278
},
{
"epoch": 0.7566101694915254,
"grad_norm": 0.322265625,
"learning_rate": 2.5e-06,
"loss": 0.1501,
"step": 279
},
{
"epoch": 0.7593220338983051,
"grad_norm": 0.396484375,
"learning_rate": 2.472527472527473e-06,
"loss": 0.209,
"step": 280
},
{
"epoch": 0.7593220338983051,
"eval_loss": 0.2017662674188614,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5035,
"eval_samples_per_second": 8.693,
"eval_steps_per_second": 4.347,
"step": 280
},
{
"epoch": 0.7620338983050847,
"grad_norm": 0.375,
"learning_rate": 2.4450549450549454e-06,
"loss": 0.178,
"step": 281
},
{
"epoch": 0.7647457627118645,
"grad_norm": 0.408203125,
"learning_rate": 2.4175824175824177e-06,
"loss": 0.2037,
"step": 282
},
{
"epoch": 0.7674576271186441,
"grad_norm": 0.455078125,
"learning_rate": 2.3901098901098905e-06,
"loss": 0.2565,
"step": 283
},
{
"epoch": 0.7701694915254237,
"grad_norm": 0.451171875,
"learning_rate": 2.362637362637363e-06,
"loss": 0.2449,
"step": 284
},
{
"epoch": 0.7728813559322034,
"grad_norm": 0.486328125,
"learning_rate": 2.3351648351648353e-06,
"loss": 0.2613,
"step": 285
},
{
"epoch": 0.775593220338983,
"grad_norm": 0.38671875,
"learning_rate": 2.307692307692308e-06,
"loss": 0.2347,
"step": 286
},
{
"epoch": 0.7783050847457628,
"grad_norm": 0.4140625,
"learning_rate": 2.2802197802197804e-06,
"loss": 0.1807,
"step": 287
},
{
"epoch": 0.7810169491525424,
"grad_norm": 0.388671875,
"learning_rate": 2.252747252747253e-06,
"loss": 0.2093,
"step": 288
},
{
"epoch": 0.783728813559322,
"grad_norm": 0.38671875,
"learning_rate": 2.225274725274725e-06,
"loss": 0.1885,
"step": 289
},
{
"epoch": 0.7864406779661017,
"grad_norm": 0.296875,
"learning_rate": 2.197802197802198e-06,
"loss": 0.1163,
"step": 290
},
{
"epoch": 0.7864406779661017,
"eval_loss": 0.2012355625629425,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5261,
"eval_samples_per_second": 8.676,
"eval_steps_per_second": 4.338,
"step": 290
},
{
"epoch": 0.7891525423728813,
"grad_norm": 0.484375,
"learning_rate": 2.1703296703296704e-06,
"loss": 0.2859,
"step": 291
},
{
"epoch": 0.791864406779661,
"grad_norm": 0.37890625,
"learning_rate": 2.1428571428571427e-06,
"loss": 0.186,
"step": 292
},
{
"epoch": 0.7945762711864407,
"grad_norm": 0.36328125,
"learning_rate": 2.1153846153846155e-06,
"loss": 0.1688,
"step": 293
},
{
"epoch": 0.7972881355932203,
"grad_norm": 0.291015625,
"learning_rate": 2.0879120879120883e-06,
"loss": 0.1007,
"step": 294
},
{
"epoch": 0.8,
"grad_norm": 0.53515625,
"learning_rate": 2.0604395604395607e-06,
"loss": 0.3142,
"step": 295
},
{
"epoch": 0.8027118644067797,
"grad_norm": 0.439453125,
"learning_rate": 2.032967032967033e-06,
"loss": 0.2239,
"step": 296
},
{
"epoch": 0.8054237288135593,
"grad_norm": 0.400390625,
"learning_rate": 2.005494505494506e-06,
"loss": 0.1749,
"step": 297
},
{
"epoch": 0.808135593220339,
"grad_norm": 0.439453125,
"learning_rate": 1.9780219780219782e-06,
"loss": 0.2692,
"step": 298
},
{
"epoch": 0.8108474576271186,
"grad_norm": 0.466796875,
"learning_rate": 1.9505494505494506e-06,
"loss": 0.3106,
"step": 299
},
{
"epoch": 0.8135593220338984,
"grad_norm": 0.359375,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.1723,
"step": 300
},
{
"epoch": 0.8135593220338984,
"eval_loss": 0.20140354335308075,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5168,
"eval_samples_per_second": 8.683,
"eval_steps_per_second": 4.342,
"step": 300
},
{
"epoch": 0.816271186440678,
"grad_norm": 0.45703125,
"learning_rate": 1.8956043956043958e-06,
"loss": 0.1482,
"step": 301
},
{
"epoch": 0.8189830508474576,
"grad_norm": 0.4375,
"learning_rate": 1.8681318681318684e-06,
"loss": 0.2671,
"step": 302
},
{
"epoch": 0.8216949152542373,
"grad_norm": 0.421875,
"learning_rate": 1.840659340659341e-06,
"loss": 0.2112,
"step": 303
},
{
"epoch": 0.8244067796610169,
"grad_norm": 0.4140625,
"learning_rate": 1.8131868131868133e-06,
"loss": 0.1977,
"step": 304
},
{
"epoch": 0.8271186440677966,
"grad_norm": 0.3046875,
"learning_rate": 1.7857142857142859e-06,
"loss": 0.1583,
"step": 305
},
{
"epoch": 0.8298305084745763,
"grad_norm": 0.3125,
"learning_rate": 1.7582417582417585e-06,
"loss": 0.1156,
"step": 306
},
{
"epoch": 0.8325423728813559,
"grad_norm": 0.322265625,
"learning_rate": 1.7307692307692308e-06,
"loss": 0.1492,
"step": 307
},
{
"epoch": 0.8352542372881356,
"grad_norm": 0.4609375,
"learning_rate": 1.7032967032967034e-06,
"loss": 0.2713,
"step": 308
},
{
"epoch": 0.8379661016949153,
"grad_norm": 0.384765625,
"learning_rate": 1.6758241758241758e-06,
"loss": 0.1979,
"step": 309
},
{
"epoch": 0.8406779661016949,
"grad_norm": 0.30859375,
"learning_rate": 1.6483516483516484e-06,
"loss": 0.1339,
"step": 310
},
{
"epoch": 0.8406779661016949,
"eval_loss": 0.2008802443742752,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5181,
"eval_samples_per_second": 8.682,
"eval_steps_per_second": 4.341,
"step": 310
},
{
"epoch": 0.8433898305084746,
"grad_norm": 0.435546875,
"learning_rate": 1.6208791208791212e-06,
"loss": 0.1836,
"step": 311
},
{
"epoch": 0.8461016949152542,
"grad_norm": 0.326171875,
"learning_rate": 1.5934065934065933e-06,
"loss": 0.1229,
"step": 312
},
{
"epoch": 0.848813559322034,
"grad_norm": 0.41015625,
"learning_rate": 1.5659340659340661e-06,
"loss": 0.2302,
"step": 313
},
{
"epoch": 0.8515254237288136,
"grad_norm": 0.470703125,
"learning_rate": 1.5384615384615387e-06,
"loss": 0.283,
"step": 314
},
{
"epoch": 0.8542372881355932,
"grad_norm": 0.41796875,
"learning_rate": 1.510989010989011e-06,
"loss": 0.1891,
"step": 315
},
{
"epoch": 0.8569491525423729,
"grad_norm": 0.361328125,
"learning_rate": 1.4835164835164837e-06,
"loss": 0.2037,
"step": 316
},
{
"epoch": 0.8596610169491525,
"grad_norm": 0.3984375,
"learning_rate": 1.4560439560439563e-06,
"loss": 0.1894,
"step": 317
},
{
"epoch": 0.8623728813559322,
"grad_norm": 0.37890625,
"learning_rate": 1.4285714285714286e-06,
"loss": 0.188,
"step": 318
},
{
"epoch": 0.8650847457627119,
"grad_norm": 0.3984375,
"learning_rate": 1.4010989010989012e-06,
"loss": 0.1888,
"step": 319
},
{
"epoch": 0.8677966101694915,
"grad_norm": 0.3515625,
"learning_rate": 1.3736263736263738e-06,
"loss": 0.136,
"step": 320
},
{
"epoch": 0.8677966101694915,
"eval_loss": 0.20059189200401306,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5219,
"eval_samples_per_second": 8.679,
"eval_steps_per_second": 4.34,
"step": 320
},
{
"epoch": 0.8705084745762712,
"grad_norm": 0.44140625,
"learning_rate": 1.3461538461538462e-06,
"loss": 0.1919,
"step": 321
},
{
"epoch": 0.8732203389830508,
"grad_norm": 0.4140625,
"learning_rate": 1.3186813186813187e-06,
"loss": 0.1494,
"step": 322
},
{
"epoch": 0.8759322033898305,
"grad_norm": 0.40625,
"learning_rate": 1.2912087912087915e-06,
"loss": 0.1777,
"step": 323
},
{
"epoch": 0.8786440677966102,
"grad_norm": 0.29296875,
"learning_rate": 1.2637362637362637e-06,
"loss": 0.1137,
"step": 324
},
{
"epoch": 0.8813559322033898,
"grad_norm": 0.53125,
"learning_rate": 1.2362637362637365e-06,
"loss": 0.2659,
"step": 325
},
{
"epoch": 0.8840677966101695,
"grad_norm": 0.46875,
"learning_rate": 1.2087912087912089e-06,
"loss": 0.2715,
"step": 326
},
{
"epoch": 0.8867796610169492,
"grad_norm": 0.53515625,
"learning_rate": 1.1813186813186815e-06,
"loss": 0.3199,
"step": 327
},
{
"epoch": 0.8894915254237288,
"grad_norm": 0.328125,
"learning_rate": 1.153846153846154e-06,
"loss": 0.1237,
"step": 328
},
{
"epoch": 0.8922033898305085,
"grad_norm": 0.392578125,
"learning_rate": 1.1263736263736264e-06,
"loss": 0.1683,
"step": 329
},
{
"epoch": 0.8949152542372881,
"grad_norm": 0.328125,
"learning_rate": 1.098901098901099e-06,
"loss": 0.1182,
"step": 330
},
{
"epoch": 0.8949152542372881,
"eval_loss": 0.2006920427083969,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5312,
"eval_samples_per_second": 8.672,
"eval_steps_per_second": 4.336,
"step": 330
},
{
"epoch": 0.8976271186440677,
"grad_norm": 0.384765625,
"learning_rate": 1.0714285714285714e-06,
"loss": 0.2004,
"step": 331
},
{
"epoch": 0.9003389830508475,
"grad_norm": 0.4375,
"learning_rate": 1.0439560439560442e-06,
"loss": 0.1806,
"step": 332
},
{
"epoch": 0.9030508474576271,
"grad_norm": 0.33203125,
"learning_rate": 1.0164835164835165e-06,
"loss": 0.1207,
"step": 333
},
{
"epoch": 0.9057627118644068,
"grad_norm": 0.52734375,
"learning_rate": 9.890109890109891e-07,
"loss": 0.262,
"step": 334
},
{
"epoch": 0.9084745762711864,
"grad_norm": 0.427734375,
"learning_rate": 9.615384615384617e-07,
"loss": 0.1493,
"step": 335
},
{
"epoch": 0.9111864406779661,
"grad_norm": 0.46484375,
"learning_rate": 9.340659340659342e-07,
"loss": 0.2196,
"step": 336
},
{
"epoch": 0.9138983050847458,
"grad_norm": 0.43359375,
"learning_rate": 9.065934065934067e-07,
"loss": 0.2253,
"step": 337
},
{
"epoch": 0.9166101694915254,
"grad_norm": 0.55859375,
"learning_rate": 8.791208791208792e-07,
"loss": 0.3115,
"step": 338
},
{
"epoch": 0.9193220338983051,
"grad_norm": 0.400390625,
"learning_rate": 8.516483516483517e-07,
"loss": 0.1799,
"step": 339
},
{
"epoch": 0.9220338983050848,
"grad_norm": 0.443359375,
"learning_rate": 8.241758241758242e-07,
"loss": 0.2435,
"step": 340
},
{
"epoch": 0.9220338983050848,
"eval_loss": 0.20056262612342834,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.5199,
"eval_samples_per_second": 8.681,
"eval_steps_per_second": 4.34,
"step": 340
},
{
"epoch": 0.9247457627118644,
"grad_norm": 0.3828125,
"learning_rate": 7.967032967032967e-07,
"loss": 0.1995,
"step": 341
},
{
"epoch": 0.9274576271186441,
"grad_norm": 0.4375,
"learning_rate": 7.692307692307694e-07,
"loss": 0.2467,
"step": 342
},
{
"epoch": 0.9301694915254237,
"grad_norm": 0.447265625,
"learning_rate": 7.417582417582418e-07,
"loss": 0.1974,
"step": 343
},
{
"epoch": 0.9328813559322033,
"grad_norm": 0.455078125,
"learning_rate": 7.142857142857143e-07,
"loss": 0.1845,
"step": 344
},
{
"epoch": 0.9355932203389831,
"grad_norm": 0.4921875,
"learning_rate": 6.868131868131869e-07,
"loss": 0.2456,
"step": 345
},
{
"epoch": 0.9383050847457627,
"grad_norm": 0.37890625,
"learning_rate": 6.593406593406594e-07,
"loss": 0.2504,
"step": 346
},
{
"epoch": 0.9410169491525424,
"grad_norm": 0.416015625,
"learning_rate": 6.318681318681319e-07,
"loss": 0.2282,
"step": 347
},
{
"epoch": 0.943728813559322,
"grad_norm": 0.474609375,
"learning_rate": 6.043956043956044e-07,
"loss": 0.2812,
"step": 348
},
{
"epoch": 0.9464406779661017,
"grad_norm": 0.29296875,
"learning_rate": 5.76923076923077e-07,
"loss": 0.111,
"step": 349
},
{
"epoch": 0.9491525423728814,
"grad_norm": 0.396484375,
"learning_rate": 5.494505494505495e-07,
"loss": 0.1889,
"step": 350
},
{
"epoch": 0.9491525423728814,
"eval_loss": 0.20049941539764404,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3342,
"eval_samples_per_second": 8.823,
"eval_steps_per_second": 4.411,
"step": 350
},
{
"epoch": 0.951864406779661,
"grad_norm": 0.47265625,
"learning_rate": 5.219780219780221e-07,
"loss": 0.2932,
"step": 351
},
{
"epoch": 0.9545762711864407,
"grad_norm": 0.291015625,
"learning_rate": 4.945054945054946e-07,
"loss": 0.139,
"step": 352
},
{
"epoch": 0.9572881355932203,
"grad_norm": 0.3828125,
"learning_rate": 4.670329670329671e-07,
"loss": 0.2242,
"step": 353
},
{
"epoch": 0.96,
"grad_norm": 0.267578125,
"learning_rate": 4.395604395604396e-07,
"loss": 0.1224,
"step": 354
},
{
"epoch": 0.9627118644067797,
"grad_norm": 0.39453125,
"learning_rate": 4.120879120879121e-07,
"loss": 0.2066,
"step": 355
},
{
"epoch": 0.9654237288135593,
"grad_norm": 0.302734375,
"learning_rate": 3.846153846153847e-07,
"loss": 0.1435,
"step": 356
},
{
"epoch": 0.9681355932203389,
"grad_norm": 0.431640625,
"learning_rate": 3.5714285714285716e-07,
"loss": 0.2338,
"step": 357
},
{
"epoch": 0.9708474576271187,
"grad_norm": 0.1826171875,
"learning_rate": 3.296703296703297e-07,
"loss": 0.0504,
"step": 358
},
{
"epoch": 0.9735593220338983,
"grad_norm": 0.419921875,
"learning_rate": 3.021978021978022e-07,
"loss": 0.2525,
"step": 359
},
{
"epoch": 0.976271186440678,
"grad_norm": 0.41796875,
"learning_rate": 2.7472527472527475e-07,
"loss": 0.2248,
"step": 360
},
{
"epoch": 0.976271186440678,
"eval_loss": 0.20055252313613892,
"eval_model_preparation_time": 0.0393,
"eval_runtime": 11.3303,
"eval_samples_per_second": 8.826,
"eval_steps_per_second": 4.413,
"step": 360
},
{
"epoch": 0.9789830508474576,
"grad_norm": 0.353515625,
"learning_rate": 2.472527472527473e-07,
"loss": 0.1681,
"step": 361
},
{
"epoch": 0.9816949152542372,
"grad_norm": 0.35546875,
"learning_rate": 2.197802197802198e-07,
"loss": 0.1444,
"step": 362
},
{
"epoch": 0.984406779661017,
"grad_norm": 0.392578125,
"learning_rate": 1.9230769230769234e-07,
"loss": 0.2296,
"step": 363
},
{
"epoch": 0.9871186440677966,
"grad_norm": 0.35546875,
"learning_rate": 1.6483516483516484e-07,
"loss": 0.151,
"step": 364
},
{
"epoch": 0.9898305084745763,
"grad_norm": 0.369140625,
"learning_rate": 1.3736263736263737e-07,
"loss": 0.1958,
"step": 365
},
{
"epoch": 0.9925423728813559,
"grad_norm": 0.4453125,
"learning_rate": 1.098901098901099e-07,
"loss": 0.2799,
"step": 366
},
{
"epoch": 0.9952542372881356,
"grad_norm": 0.45703125,
"learning_rate": 8.241758241758242e-08,
"loss": 0.216,
"step": 367
},
{
"epoch": 0.9979661016949153,
"grad_norm": 0.3125,
"learning_rate": 5.494505494505495e-08,
"loss": 0.146,
"step": 368
},
{
"epoch": 1.0,
"grad_norm": 0.4921875,
"learning_rate": 2.7472527472527476e-08,
"loss": 0.1912,
"step": 369
}
],
"logging_steps": 1,
"max_steps": 369,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.2727396811268506e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}