Files
Qwen3-4B-Base-ftjob-6fd14d9…/checkpoint-144/trainer_state.json
ModelHub XC 363fe595ec 初始化项目,由ModelHub XC社区提供模型
Model: longtermrisk/Qwen3-4B-Base-ftjob-6fd14d9c448d
Source: Original Platform
2026-04-25 16:24:05 +08:00

1061 lines
24 KiB
JSON

{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 144,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020942408376963352,
"grad_norm": 0.8203125,
"learning_rate": 0.0,
"loss": 2.3469,
"step": 1
},
{
"epoch": 0.041884816753926704,
"grad_norm": 0.86328125,
"learning_rate": 4.000000000000001e-06,
"loss": 2.3465,
"step": 2
},
{
"epoch": 0.06282722513089005,
"grad_norm": 0.828125,
"learning_rate": 8.000000000000001e-06,
"loss": 2.3569,
"step": 3
},
{
"epoch": 0.08376963350785341,
"grad_norm": 0.89453125,
"learning_rate": 1.2e-05,
"loss": 2.3221,
"step": 4
},
{
"epoch": 0.10471204188481675,
"grad_norm": 0.765625,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.3378,
"step": 5
},
{
"epoch": 0.1256544502617801,
"grad_norm": 0.8125,
"learning_rate": 2e-05,
"loss": 2.372,
"step": 6
},
{
"epoch": 0.14659685863874344,
"grad_norm": 0.7578125,
"learning_rate": 1.985611510791367e-05,
"loss": 2.3172,
"step": 7
},
{
"epoch": 0.16753926701570682,
"grad_norm": 0.703125,
"learning_rate": 1.971223021582734e-05,
"loss": 2.3166,
"step": 8
},
{
"epoch": 0.18848167539267016,
"grad_norm": 0.7578125,
"learning_rate": 1.956834532374101e-05,
"loss": 2.3642,
"step": 9
},
{
"epoch": 0.2094240837696335,
"grad_norm": 0.69921875,
"learning_rate": 1.9424460431654675e-05,
"loss": 2.4136,
"step": 10
},
{
"epoch": 0.23036649214659685,
"grad_norm": 0.66796875,
"learning_rate": 1.9280575539568347e-05,
"loss": 2.2907,
"step": 11
},
{
"epoch": 0.2513089005235602,
"grad_norm": 0.63671875,
"learning_rate": 1.9136690647482016e-05,
"loss": 2.1985,
"step": 12
},
{
"epoch": 0.27225130890052357,
"grad_norm": 0.61328125,
"learning_rate": 1.8992805755395684e-05,
"loss": 2.2085,
"step": 13
},
{
"epoch": 0.2931937172774869,
"grad_norm": 0.59765625,
"learning_rate": 1.8848920863309356e-05,
"loss": 2.3054,
"step": 14
},
{
"epoch": 0.31413612565445026,
"grad_norm": 0.56640625,
"learning_rate": 1.870503597122302e-05,
"loss": 2.1966,
"step": 15
},
{
"epoch": 0.33507853403141363,
"grad_norm": 0.58203125,
"learning_rate": 1.8561151079136693e-05,
"loss": 2.1649,
"step": 16
},
{
"epoch": 0.35602094240837695,
"grad_norm": 0.55859375,
"learning_rate": 1.841726618705036e-05,
"loss": 2.2583,
"step": 17
},
{
"epoch": 0.3769633507853403,
"grad_norm": 0.5859375,
"learning_rate": 1.827338129496403e-05,
"loss": 2.3205,
"step": 18
},
{
"epoch": 0.39790575916230364,
"grad_norm": 0.54296875,
"learning_rate": 1.8129496402877698e-05,
"loss": 2.1887,
"step": 19
},
{
"epoch": 0.418848167539267,
"grad_norm": 0.5703125,
"learning_rate": 1.7985611510791367e-05,
"loss": 2.2294,
"step": 20
},
{
"epoch": 0.4397905759162304,
"grad_norm": 0.578125,
"learning_rate": 1.784172661870504e-05,
"loss": 2.1661,
"step": 21
},
{
"epoch": 0.4607329842931937,
"grad_norm": 0.5546875,
"learning_rate": 1.7697841726618707e-05,
"loss": 2.0935,
"step": 22
},
{
"epoch": 0.4816753926701571,
"grad_norm": 0.52734375,
"learning_rate": 1.7553956834532375e-05,
"loss": 2.1805,
"step": 23
},
{
"epoch": 0.5026178010471204,
"grad_norm": 0.6796875,
"learning_rate": 1.7410071942446044e-05,
"loss": 2.2131,
"step": 24
},
{
"epoch": 0.5235602094240838,
"grad_norm": 0.53125,
"learning_rate": 1.7266187050359712e-05,
"loss": 2.175,
"step": 25
},
{
"epoch": 0.5445026178010471,
"grad_norm": 0.50390625,
"learning_rate": 1.7122302158273384e-05,
"loss": 2.1707,
"step": 26
},
{
"epoch": 0.5654450261780105,
"grad_norm": 0.51171875,
"learning_rate": 1.6978417266187053e-05,
"loss": 2.0651,
"step": 27
},
{
"epoch": 0.5863874345549738,
"grad_norm": 0.53515625,
"learning_rate": 1.683453237410072e-05,
"loss": 2.1611,
"step": 28
},
{
"epoch": 0.6073298429319371,
"grad_norm": 0.53125,
"learning_rate": 1.669064748201439e-05,
"loss": 2.1401,
"step": 29
},
{
"epoch": 0.6282722513089005,
"grad_norm": 0.5390625,
"learning_rate": 1.6546762589928058e-05,
"loss": 2.1536,
"step": 30
},
{
"epoch": 0.6492146596858639,
"grad_norm": 0.5234375,
"learning_rate": 1.640287769784173e-05,
"loss": 2.207,
"step": 31
},
{
"epoch": 0.6701570680628273,
"grad_norm": 0.5234375,
"learning_rate": 1.6258992805755398e-05,
"loss": 2.1138,
"step": 32
},
{
"epoch": 0.6910994764397905,
"grad_norm": 0.5078125,
"learning_rate": 1.6115107913669067e-05,
"loss": 2.1441,
"step": 33
},
{
"epoch": 0.7120418848167539,
"grad_norm": 0.5,
"learning_rate": 1.5971223021582735e-05,
"loss": 2.1523,
"step": 34
},
{
"epoch": 0.7329842931937173,
"grad_norm": 0.50390625,
"learning_rate": 1.5827338129496403e-05,
"loss": 2.1216,
"step": 35
},
{
"epoch": 0.7539267015706806,
"grad_norm": 0.486328125,
"learning_rate": 1.5683453237410072e-05,
"loss": 2.0619,
"step": 36
},
{
"epoch": 0.774869109947644,
"grad_norm": 0.498046875,
"learning_rate": 1.5539568345323744e-05,
"loss": 2.0156,
"step": 37
},
{
"epoch": 0.7958115183246073,
"grad_norm": 0.50390625,
"learning_rate": 1.5395683453237412e-05,
"loss": 2.1156,
"step": 38
},
{
"epoch": 0.8167539267015707,
"grad_norm": 0.5,
"learning_rate": 1.525179856115108e-05,
"loss": 2.0561,
"step": 39
},
{
"epoch": 0.837696335078534,
"grad_norm": 0.52734375,
"learning_rate": 1.5107913669064749e-05,
"loss": 2.1226,
"step": 40
},
{
"epoch": 0.8586387434554974,
"grad_norm": 0.51171875,
"learning_rate": 1.496402877697842e-05,
"loss": 2.1199,
"step": 41
},
{
"epoch": 0.8795811518324608,
"grad_norm": 0.52734375,
"learning_rate": 1.4820143884892086e-05,
"loss": 2.1843,
"step": 42
},
{
"epoch": 0.900523560209424,
"grad_norm": 0.515625,
"learning_rate": 1.4676258992805756e-05,
"loss": 2.103,
"step": 43
},
{
"epoch": 0.9214659685863874,
"grad_norm": 0.53515625,
"learning_rate": 1.4532374100719426e-05,
"loss": 2.1346,
"step": 44
},
{
"epoch": 0.9424083769633508,
"grad_norm": 0.5390625,
"learning_rate": 1.4388489208633095e-05,
"loss": 2.0813,
"step": 45
},
{
"epoch": 0.9633507853403142,
"grad_norm": 0.55859375,
"learning_rate": 1.4244604316546765e-05,
"loss": 2.1774,
"step": 46
},
{
"epoch": 0.9842931937172775,
"grad_norm": 0.51953125,
"learning_rate": 1.4100719424460432e-05,
"loss": 2.0659,
"step": 47
},
{
"epoch": 1.0,
"grad_norm": 0.59765625,
"learning_rate": 1.3956834532374102e-05,
"loss": 2.0783,
"step": 48
},
{
"epoch": 1.0,
"eval_loss": 2.0860884189605713,
"eval_model_preparation_time": 0.0149,
"eval_runtime": 8.1703,
"eval_samples_per_second": 10.404,
"eval_steps_per_second": 5.263,
"step": 48
},
{
"epoch": 1.0209424083769634,
"grad_norm": 0.490234375,
"learning_rate": 1.3812949640287772e-05,
"loss": 2.0715,
"step": 49
},
{
"epoch": 1.0418848167539267,
"grad_norm": 0.49609375,
"learning_rate": 1.3669064748201439e-05,
"loss": 2.0527,
"step": 50
},
{
"epoch": 1.0628272251308901,
"grad_norm": 0.51171875,
"learning_rate": 1.3525179856115109e-05,
"loss": 1.9961,
"step": 51
},
{
"epoch": 1.0837696335078535,
"grad_norm": 0.54296875,
"learning_rate": 1.3381294964028777e-05,
"loss": 2.1151,
"step": 52
},
{
"epoch": 1.1047120418848166,
"grad_norm": 0.51171875,
"learning_rate": 1.3237410071942447e-05,
"loss": 2.0206,
"step": 53
},
{
"epoch": 1.12565445026178,
"grad_norm": 0.515625,
"learning_rate": 1.3093525179856117e-05,
"loss": 2.1006,
"step": 54
},
{
"epoch": 1.1465968586387434,
"grad_norm": 0.50390625,
"learning_rate": 1.2949640287769784e-05,
"loss": 2.0258,
"step": 55
},
{
"epoch": 1.1675392670157068,
"grad_norm": 0.53125,
"learning_rate": 1.2805755395683454e-05,
"loss": 1.981,
"step": 56
},
{
"epoch": 1.1884816753926701,
"grad_norm": 0.54296875,
"learning_rate": 1.2661870503597123e-05,
"loss": 2.1331,
"step": 57
},
{
"epoch": 1.2094240837696335,
"grad_norm": 0.494140625,
"learning_rate": 1.2517985611510793e-05,
"loss": 2.1064,
"step": 58
},
{
"epoch": 1.2303664921465969,
"grad_norm": 0.54296875,
"learning_rate": 1.2374100719424463e-05,
"loss": 1.9746,
"step": 59
},
{
"epoch": 1.2513089005235603,
"grad_norm": 0.4921875,
"learning_rate": 1.223021582733813e-05,
"loss": 2.0598,
"step": 60
},
{
"epoch": 1.2722513089005236,
"grad_norm": 0.53125,
"learning_rate": 1.20863309352518e-05,
"loss": 2.0676,
"step": 61
},
{
"epoch": 1.2931937172774868,
"grad_norm": 0.5078125,
"learning_rate": 1.1942446043165468e-05,
"loss": 2.0734,
"step": 62
},
{
"epoch": 1.3141361256544504,
"grad_norm": 0.5234375,
"learning_rate": 1.1798561151079137e-05,
"loss": 1.968,
"step": 63
},
{
"epoch": 1.3350785340314135,
"grad_norm": 0.546875,
"learning_rate": 1.1654676258992807e-05,
"loss": 2.0268,
"step": 64
},
{
"epoch": 1.356020942408377,
"grad_norm": 0.5390625,
"learning_rate": 1.1510791366906475e-05,
"loss": 2.0467,
"step": 65
},
{
"epoch": 1.3769633507853403,
"grad_norm": 0.51171875,
"learning_rate": 1.1366906474820146e-05,
"loss": 2.0148,
"step": 66
},
{
"epoch": 1.3979057591623036,
"grad_norm": 0.54296875,
"learning_rate": 1.1223021582733812e-05,
"loss": 2.0084,
"step": 67
},
{
"epoch": 1.418848167539267,
"grad_norm": 0.51171875,
"learning_rate": 1.1079136690647482e-05,
"loss": 2.005,
"step": 68
},
{
"epoch": 1.4397905759162304,
"grad_norm": 0.490234375,
"learning_rate": 1.0935251798561153e-05,
"loss": 2.0613,
"step": 69
},
{
"epoch": 1.4607329842931938,
"grad_norm": 0.51171875,
"learning_rate": 1.0791366906474821e-05,
"loss": 2.0394,
"step": 70
},
{
"epoch": 1.4816753926701571,
"grad_norm": 0.50390625,
"learning_rate": 1.0647482014388491e-05,
"loss": 2.0328,
"step": 71
},
{
"epoch": 1.5026178010471205,
"grad_norm": 0.51953125,
"learning_rate": 1.0503597122302158e-05,
"loss": 2.0529,
"step": 72
},
{
"epoch": 1.5235602094240837,
"grad_norm": 0.51953125,
"learning_rate": 1.0359712230215828e-05,
"loss": 1.9776,
"step": 73
},
{
"epoch": 1.5445026178010473,
"grad_norm": 0.53515625,
"learning_rate": 1.0215827338129498e-05,
"loss": 2.0813,
"step": 74
},
{
"epoch": 1.5654450261780104,
"grad_norm": 0.51953125,
"learning_rate": 1.0071942446043167e-05,
"loss": 1.9543,
"step": 75
},
{
"epoch": 1.5863874345549738,
"grad_norm": 0.51171875,
"learning_rate": 9.928057553956835e-06,
"loss": 2.0868,
"step": 76
},
{
"epoch": 1.6073298429319371,
"grad_norm": 0.51171875,
"learning_rate": 9.784172661870505e-06,
"loss": 2.0053,
"step": 77
},
{
"epoch": 1.6282722513089005,
"grad_norm": 0.609375,
"learning_rate": 9.640287769784174e-06,
"loss": 2.074,
"step": 78
},
{
"epoch": 1.649214659685864,
"grad_norm": 0.52734375,
"learning_rate": 9.496402877697842e-06,
"loss": 1.9674,
"step": 79
},
{
"epoch": 1.6701570680628273,
"grad_norm": 0.5234375,
"learning_rate": 9.35251798561151e-06,
"loss": 2.0277,
"step": 80
},
{
"epoch": 1.6910994764397906,
"grad_norm": 0.50390625,
"learning_rate": 9.20863309352518e-06,
"loss": 1.9616,
"step": 81
},
{
"epoch": 1.7120418848167538,
"grad_norm": 0.5234375,
"learning_rate": 9.064748201438849e-06,
"loss": 2.0238,
"step": 82
},
{
"epoch": 1.7329842931937174,
"grad_norm": 0.5234375,
"learning_rate": 8.92086330935252e-06,
"loss": 1.9599,
"step": 83
},
{
"epoch": 1.7539267015706805,
"grad_norm": 0.54296875,
"learning_rate": 8.776978417266188e-06,
"loss": 2.033,
"step": 84
},
{
"epoch": 1.7748691099476441,
"grad_norm": 0.5078125,
"learning_rate": 8.633093525179856e-06,
"loss": 2.0615,
"step": 85
},
{
"epoch": 1.7958115183246073,
"grad_norm": 0.53125,
"learning_rate": 8.489208633093526e-06,
"loss": 1.9893,
"step": 86
},
{
"epoch": 1.8167539267015707,
"grad_norm": 0.5546875,
"learning_rate": 8.345323741007195e-06,
"loss": 2.0189,
"step": 87
},
{
"epoch": 1.837696335078534,
"grad_norm": 0.50390625,
"learning_rate": 8.201438848920865e-06,
"loss": 1.9337,
"step": 88
},
{
"epoch": 1.8586387434554974,
"grad_norm": 0.53515625,
"learning_rate": 8.057553956834533e-06,
"loss": 2.0259,
"step": 89
},
{
"epoch": 1.8795811518324608,
"grad_norm": 0.51171875,
"learning_rate": 7.913669064748202e-06,
"loss": 2.0442,
"step": 90
},
{
"epoch": 1.900523560209424,
"grad_norm": 0.52734375,
"learning_rate": 7.769784172661872e-06,
"loss": 2.0282,
"step": 91
},
{
"epoch": 1.9214659685863875,
"grad_norm": 0.5234375,
"learning_rate": 7.62589928057554e-06,
"loss": 2.0452,
"step": 92
},
{
"epoch": 1.9424083769633507,
"grad_norm": 0.5234375,
"learning_rate": 7.48201438848921e-06,
"loss": 2.0285,
"step": 93
},
{
"epoch": 1.9633507853403143,
"grad_norm": 0.5234375,
"learning_rate": 7.338129496402878e-06,
"loss": 1.9695,
"step": 94
},
{
"epoch": 1.9842931937172774,
"grad_norm": 0.5234375,
"learning_rate": 7.194244604316547e-06,
"loss": 2.0677,
"step": 95
},
{
"epoch": 2.0,
"grad_norm": 0.62890625,
"learning_rate": 7.050359712230216e-06,
"loss": 2.0551,
"step": 96
},
{
"epoch": 2.0,
"eval_loss": 2.038198471069336,
"eval_model_preparation_time": 0.0149,
"eval_runtime": 8.0804,
"eval_samples_per_second": 10.519,
"eval_steps_per_second": 5.322,
"step": 96
},
{
"epoch": 2.020942408376963,
"grad_norm": 0.54296875,
"learning_rate": 6.906474820143886e-06,
"loss": 2.0502,
"step": 97
},
{
"epoch": 2.0418848167539267,
"grad_norm": 0.5078125,
"learning_rate": 6.762589928057554e-06,
"loss": 2.0383,
"step": 98
},
{
"epoch": 2.06282722513089,
"grad_norm": 0.515625,
"learning_rate": 6.618705035971224e-06,
"loss": 2.04,
"step": 99
},
{
"epoch": 2.0837696335078535,
"grad_norm": 0.54296875,
"learning_rate": 6.474820143884892e-06,
"loss": 1.9828,
"step": 100
},
{
"epoch": 2.1047120418848166,
"grad_norm": 0.50390625,
"learning_rate": 6.330935251798561e-06,
"loss": 2.025,
"step": 101
},
{
"epoch": 2.1256544502617802,
"grad_norm": 0.5234375,
"learning_rate": 6.1870503597122315e-06,
"loss": 1.9143,
"step": 102
},
{
"epoch": 2.1465968586387434,
"grad_norm": 0.50390625,
"learning_rate": 6.0431654676259e-06,
"loss": 1.998,
"step": 103
},
{
"epoch": 2.167539267015707,
"grad_norm": 0.5234375,
"learning_rate": 5.899280575539568e-06,
"loss": 2.0212,
"step": 104
},
{
"epoch": 2.18848167539267,
"grad_norm": 0.53125,
"learning_rate": 5.755395683453238e-06,
"loss": 2.0234,
"step": 105
},
{
"epoch": 2.2094240837696333,
"grad_norm": 0.5234375,
"learning_rate": 5.611510791366906e-06,
"loss": 1.9175,
"step": 106
},
{
"epoch": 2.230366492146597,
"grad_norm": 0.5234375,
"learning_rate": 5.467625899280576e-06,
"loss": 2.0098,
"step": 107
},
{
"epoch": 2.25130890052356,
"grad_norm": 0.51953125,
"learning_rate": 5.3237410071942456e-06,
"loss": 1.9502,
"step": 108
},
{
"epoch": 2.2722513089005236,
"grad_norm": 0.51953125,
"learning_rate": 5.179856115107914e-06,
"loss": 1.9713,
"step": 109
},
{
"epoch": 2.2931937172774868,
"grad_norm": 0.546875,
"learning_rate": 5.035971223021583e-06,
"loss": 1.8965,
"step": 110
},
{
"epoch": 2.3141361256544504,
"grad_norm": 0.5625,
"learning_rate": 4.892086330935253e-06,
"loss": 2.0483,
"step": 111
},
{
"epoch": 2.3350785340314135,
"grad_norm": 0.5234375,
"learning_rate": 4.748201438848921e-06,
"loss": 1.9471,
"step": 112
},
{
"epoch": 2.356020942408377,
"grad_norm": 0.53125,
"learning_rate": 4.60431654676259e-06,
"loss": 1.9803,
"step": 113
},
{
"epoch": 2.3769633507853403,
"grad_norm": 0.53515625,
"learning_rate": 4.46043165467626e-06,
"loss": 2.0178,
"step": 114
},
{
"epoch": 2.3979057591623034,
"grad_norm": 0.5390625,
"learning_rate": 4.316546762589928e-06,
"loss": 2.0112,
"step": 115
},
{
"epoch": 2.418848167539267,
"grad_norm": 0.5703125,
"learning_rate": 4.172661870503597e-06,
"loss": 2.0413,
"step": 116
},
{
"epoch": 2.4397905759162306,
"grad_norm": 0.52734375,
"learning_rate": 4.028776978417267e-06,
"loss": 2.011,
"step": 117
},
{
"epoch": 2.4607329842931938,
"grad_norm": 0.51171875,
"learning_rate": 3.884892086330936e-06,
"loss": 2.0063,
"step": 118
},
{
"epoch": 2.481675392670157,
"grad_norm": 0.5234375,
"learning_rate": 3.741007194244605e-06,
"loss": 1.9494,
"step": 119
},
{
"epoch": 2.5026178010471205,
"grad_norm": 0.5859375,
"learning_rate": 3.5971223021582737e-06,
"loss": 2.1046,
"step": 120
},
{
"epoch": 2.5235602094240837,
"grad_norm": 0.50390625,
"learning_rate": 3.453237410071943e-06,
"loss": 1.9551,
"step": 121
},
{
"epoch": 2.5445026178010473,
"grad_norm": 0.53125,
"learning_rate": 3.309352517985612e-06,
"loss": 1.9959,
"step": 122
},
{
"epoch": 2.5654450261780104,
"grad_norm": 0.53125,
"learning_rate": 3.1654676258992807e-06,
"loss": 1.9538,
"step": 123
},
{
"epoch": 2.5863874345549736,
"grad_norm": 0.55078125,
"learning_rate": 3.02158273381295e-06,
"loss": 2.0185,
"step": 124
},
{
"epoch": 2.607329842931937,
"grad_norm": 0.5390625,
"learning_rate": 2.877697841726619e-06,
"loss": 2.0775,
"step": 125
},
{
"epoch": 2.6282722513089007,
"grad_norm": 0.54296875,
"learning_rate": 2.733812949640288e-06,
"loss": 1.9863,
"step": 126
},
{
"epoch": 2.649214659685864,
"grad_norm": 0.5390625,
"learning_rate": 2.589928057553957e-06,
"loss": 1.969,
"step": 127
},
{
"epoch": 2.670157068062827,
"grad_norm": 0.53515625,
"learning_rate": 2.4460431654676263e-06,
"loss": 2.0218,
"step": 128
},
{
"epoch": 2.6910994764397906,
"grad_norm": 0.55078125,
"learning_rate": 2.302158273381295e-06,
"loss": 1.9308,
"step": 129
},
{
"epoch": 2.712041884816754,
"grad_norm": 0.50390625,
"learning_rate": 2.158273381294964e-06,
"loss": 1.9536,
"step": 130
},
{
"epoch": 2.7329842931937174,
"grad_norm": 0.5390625,
"learning_rate": 2.0143884892086333e-06,
"loss": 1.9376,
"step": 131
},
{
"epoch": 2.7539267015706805,
"grad_norm": 0.71484375,
"learning_rate": 1.8705035971223024e-06,
"loss": 1.9802,
"step": 132
},
{
"epoch": 2.774869109947644,
"grad_norm": 0.51953125,
"learning_rate": 1.7266187050359715e-06,
"loss": 1.9955,
"step": 133
},
{
"epoch": 2.7958115183246073,
"grad_norm": 0.5234375,
"learning_rate": 1.5827338129496403e-06,
"loss": 1.9325,
"step": 134
},
{
"epoch": 2.816753926701571,
"grad_norm": 0.5234375,
"learning_rate": 1.4388489208633094e-06,
"loss": 1.9576,
"step": 135
},
{
"epoch": 2.837696335078534,
"grad_norm": 0.51171875,
"learning_rate": 1.2949640287769785e-06,
"loss": 1.9662,
"step": 136
},
{
"epoch": 2.858638743455497,
"grad_norm": 0.52734375,
"learning_rate": 1.1510791366906476e-06,
"loss": 2.0159,
"step": 137
},
{
"epoch": 2.8795811518324608,
"grad_norm": 0.5390625,
"learning_rate": 1.0071942446043167e-06,
"loss": 1.9664,
"step": 138
},
{
"epoch": 2.900523560209424,
"grad_norm": 0.53125,
"learning_rate": 8.633093525179857e-07,
"loss": 1.9848,
"step": 139
},
{
"epoch": 2.9214659685863875,
"grad_norm": 0.5,
"learning_rate": 7.194244604316547e-07,
"loss": 1.9385,
"step": 140
},
{
"epoch": 2.9424083769633507,
"grad_norm": 0.5625,
"learning_rate": 5.755395683453238e-07,
"loss": 2.0412,
"step": 141
},
{
"epoch": 2.9633507853403143,
"grad_norm": 0.53125,
"learning_rate": 4.3165467625899287e-07,
"loss": 2.0406,
"step": 142
},
{
"epoch": 2.9842931937172774,
"grad_norm": 0.5234375,
"learning_rate": 2.877697841726619e-07,
"loss": 2.005,
"step": 143
},
{
"epoch": 3.0,
"grad_norm": 0.6171875,
"learning_rate": 1.4388489208633095e-07,
"loss": 1.997,
"step": 144
}
],
"logging_steps": 1,
"max_steps": 144,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.364548133830656e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}