Model: cjiao/OpenThoughts3-greedy-groups-top-openthinker3-1.5B-checkpoint-375-length-filtered Source: Original Platform
734 lines
16 KiB
JSON
734 lines
16 KiB
JSON
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.0,
|
|
"eval_steps": 500,
|
|
"global_step": 100,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.02,
|
|
"grad_norm": 2.5414493083953857,
|
|
"learning_rate": 0.00015996052482925854,
|
|
"loss": 1.3158,
|
|
"step": 1
|
|
},
|
|
{
|
|
"epoch": 0.04,
|
|
"grad_norm": 22.558874130249023,
|
|
"learning_rate": 0.00015984213827426174,
|
|
"loss": 3.9659,
|
|
"step": 2
|
|
},
|
|
{
|
|
"epoch": 0.06,
|
|
"grad_norm": 13.792680740356445,
|
|
"learning_rate": 0.0001596449571682464,
|
|
"loss": 2.6879,
|
|
"step": 3
|
|
},
|
|
{
|
|
"epoch": 0.08,
|
|
"grad_norm": 4.81754207611084,
|
|
"learning_rate": 0.00015936917610515826,
|
|
"loss": 2.0717,
|
|
"step": 4
|
|
},
|
|
{
|
|
"epoch": 0.1,
|
|
"grad_norm": 6.582490921020508,
|
|
"learning_rate": 0.00015901506724761103,
|
|
"loss": 1.9221,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.12,
|
|
"grad_norm": 3.573866844177246,
|
|
"learning_rate": 0.00015858298005829512,
|
|
"loss": 1.7919,
|
|
"step": 6
|
|
},
|
|
{
|
|
"epoch": 0.14,
|
|
"grad_norm": 2.014141082763672,
|
|
"learning_rate": 0.0001580733409550998,
|
|
"loss": 1.6514,
|
|
"step": 7
|
|
},
|
|
{
|
|
"epoch": 0.16,
|
|
"grad_norm": 2.1408352851867676,
|
|
"learning_rate": 0.0001574866528902905,
|
|
"loss": 1.561,
|
|
"step": 8
|
|
},
|
|
{
|
|
"epoch": 0.18,
|
|
"grad_norm": 1.8259636163711548,
|
|
"learning_rate": 0.00015682349485415545,
|
|
"loss": 1.4794,
|
|
"step": 9
|
|
},
|
|
{
|
|
"epoch": 0.2,
|
|
"grad_norm": 1.1060450077056885,
|
|
"learning_rate": 0.0001560845213036123,
|
|
"loss": 1.4122,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.22,
|
|
"grad_norm": 1.6639591455459595,
|
|
"learning_rate": 0.00015527046151633805,
|
|
"loss": 1.4144,
|
|
"step": 11
|
|
},
|
|
{
|
|
"epoch": 0.24,
|
|
"grad_norm": 1.007230281829834,
|
|
"learning_rate": 0.00015438211887106013,
|
|
"loss": 1.3658,
|
|
"step": 12
|
|
},
|
|
{
|
|
"epoch": 0.26,
|
|
"grad_norm": 0.9042938947677612,
|
|
"learning_rate": 0.0001534203700547185,
|
|
"loss": 1.3555,
|
|
"step": 13
|
|
},
|
|
{
|
|
"epoch": 0.28,
|
|
"grad_norm": 1.5797706842422485,
|
|
"learning_rate": 0.00015238616419728157,
|
|
"loss": 1.3031,
|
|
"step": 14
|
|
},
|
|
{
|
|
"epoch": 0.3,
|
|
"grad_norm": 0.8265913128852844,
|
|
"learning_rate": 0.00015128052193506944,
|
|
"loss": 1.2992,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 0.32,
|
|
"grad_norm": 0.7161855697631836,
|
|
"learning_rate": 0.0001501045344035091,
|
|
"loss": 1.3053,
|
|
"step": 16
|
|
},
|
|
{
|
|
"epoch": 0.34,
|
|
"grad_norm": 0.655879557132721,
|
|
"learning_rate": 0.0001488593621603155,
|
|
"loss": 1.2677,
|
|
"step": 17
|
|
},
|
|
{
|
|
"epoch": 0.36,
|
|
"grad_norm": 0.7548253536224365,
|
|
"learning_rate": 0.00014754623404016122,
|
|
"loss": 1.243,
|
|
"step": 18
|
|
},
|
|
{
|
|
"epoch": 0.38,
|
|
"grad_norm": 0.7004591822624207,
|
|
"learning_rate": 0.00014616644594196495,
|
|
"loss": 1.2565,
|
|
"step": 19
|
|
},
|
|
{
|
|
"epoch": 0.4,
|
|
"grad_norm": 0.5514001250267029,
|
|
"learning_rate": 0.00014472135954999581,
|
|
"loss": 1.2198,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.42,
|
|
"grad_norm": 0.45575690269470215,
|
|
"learning_rate": 0.00014321240099005524,
|
|
"loss": 1.2149,
|
|
"step": 21
|
|
},
|
|
{
|
|
"epoch": 0.44,
|
|
"grad_norm": 0.45689329504966736,
|
|
"learning_rate": 0.00014164105942206316,
|
|
"loss": 1.2114,
|
|
"step": 22
|
|
},
|
|
{
|
|
"epoch": 0.46,
|
|
"grad_norm": 0.47528719902038574,
|
|
"learning_rate": 0.00014000888557043678,
|
|
"loss": 1.2158,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 0.48,
|
|
"grad_norm": 0.4457974135875702,
|
|
"learning_rate": 0.00013831749019371293,
|
|
"loss": 1.2238,
|
|
"step": 24
|
|
},
|
|
{
|
|
"epoch": 0.5,
|
|
"grad_norm": 0.35548001527786255,
|
|
"learning_rate": 0.00013656854249492382,
|
|
"loss": 1.2145,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 0.52,
|
|
"grad_norm": 0.5427165627479553,
|
|
"learning_rate": 0.00013476376847429511,
|
|
"loss": 1.2081,
|
|
"step": 26
|
|
},
|
|
{
|
|
"epoch": 0.54,
|
|
"grad_norm": 0.44462138414382935,
|
|
"learning_rate": 0.00013290494922589216,
|
|
"loss": 1.1975,
|
|
"step": 27
|
|
},
|
|
{
|
|
"epoch": 0.56,
|
|
"grad_norm": 0.43967288732528687,
|
|
"learning_rate": 0.0001309939191798952,
|
|
"loss": 1.1867,
|
|
"step": 28
|
|
},
|
|
{
|
|
"epoch": 0.58,
|
|
"grad_norm": 0.3888394236564636,
|
|
"learning_rate": 0.00012903256429223813,
|
|
"loss": 1.1566,
|
|
"step": 29
|
|
},
|
|
{
|
|
"epoch": 0.6,
|
|
"grad_norm": 0.3624572455883026,
|
|
"learning_rate": 0.00012702282018339786,
|
|
"loss": 1.1663,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.62,
|
|
"grad_norm": 0.37822291254997253,
|
|
"learning_rate": 0.00012496667022817044,
|
|
"loss": 1.1654,
|
|
"step": 31
|
|
},
|
|
{
|
|
"epoch": 0.64,
|
|
"grad_norm": 0.312124103307724,
|
|
"learning_rate": 0.00012286614359831974,
|
|
"loss": 1.1489,
|
|
"step": 32
|
|
},
|
|
{
|
|
"epoch": 0.66,
|
|
"grad_norm": 0.3584006428718567,
|
|
"learning_rate": 0.00012072331326002972,
|
|
"loss": 1.18,
|
|
"step": 33
|
|
},
|
|
{
|
|
"epoch": 0.68,
|
|
"grad_norm": 0.2872241735458374,
|
|
"learning_rate": 0.00011854029392813723,
|
|
"loss": 1.139,
|
|
"step": 34
|
|
},
|
|
{
|
|
"epoch": 0.7,
|
|
"grad_norm": 0.2621050179004669,
|
|
"learning_rate": 0.00011631923997916375,
|
|
"loss": 1.1694,
|
|
"step": 35
|
|
},
|
|
{
|
|
"epoch": 0.72,
|
|
"grad_norm": 0.2508472502231598,
|
|
"learning_rate": 0.00011406234332520582,
|
|
"loss": 1.1404,
|
|
"step": 36
|
|
},
|
|
{
|
|
"epoch": 0.74,
|
|
"grad_norm": 0.2924502491950989,
|
|
"learning_rate": 0.00011177183125078245,
|
|
"loss": 1.1311,
|
|
"step": 37
|
|
},
|
|
{
|
|
"epoch": 0.76,
|
|
"grad_norm": 0.3054457902908325,
|
|
"learning_rate": 0.00010944996421477426,
|
|
"loss": 1.1536,
|
|
"step": 38
|
|
},
|
|
{
|
|
"epoch": 0.78,
|
|
"grad_norm": 0.255205363035202,
|
|
"learning_rate": 0.00010709903361962333,
|
|
"loss": 1.137,
|
|
"step": 39
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"grad_norm": 0.2247438281774521,
|
|
"learning_rate": 0.0001047213595499958,
|
|
"loss": 1.1247,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.82,
|
|
"grad_norm": 0.22704657912254333,
|
|
"learning_rate": 0.00010231928848313836,
|
|
"loss": 1.1189,
|
|
"step": 41
|
|
},
|
|
{
|
|
"epoch": 0.84,
|
|
"grad_norm": 0.22271329164505005,
|
|
"learning_rate": 9.989519097318841e-05,
|
|
"loss": 1.1254,
|
|
"step": 42
|
|
},
|
|
{
|
|
"epoch": 0.86,
|
|
"grad_norm": 0.21321555972099304,
|
|
"learning_rate": 9.745145931172342e-05,
|
|
"loss": 1.0805,
|
|
"step": 43
|
|
},
|
|
{
|
|
"epoch": 0.88,
|
|
"grad_norm": 0.2108481377363205,
|
|
"learning_rate": 9.4990505166858e-05,
|
|
"loss": 1.1303,
|
|
"step": 44
|
|
},
|
|
{
|
|
"epoch": 0.9,
|
|
"grad_norm": 0.2166047990322113,
|
|
"learning_rate": 9.251475720321848e-05,
|
|
"loss": 1.1149,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 0.92,
|
|
"grad_norm": 0.20189222693443298,
|
|
"learning_rate": 9.002665868514435e-05,
|
|
"loss": 1.1049,
|
|
"step": 46
|
|
},
|
|
{
|
|
"epoch": 0.94,
|
|
"grad_norm": 0.23353922367095947,
|
|
"learning_rate": 8.752866506548117e-05,
|
|
"loss": 1.0833,
|
|
"step": 47
|
|
},
|
|
{
|
|
"epoch": 0.96,
|
|
"grad_norm": 0.20002281665802002,
|
|
"learning_rate": 8.502324156234508e-05,
|
|
"loss": 1.0629,
|
|
"step": 48
|
|
},
|
|
{
|
|
"epoch": 0.98,
|
|
"grad_norm": 0.22263740003108978,
|
|
"learning_rate": 8.251286072625027e-05,
|
|
"loss": 1.1009,
|
|
"step": 49
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"grad_norm": 0.2224554568529129,
|
|
"learning_rate": 8e-05,
|
|
"loss": 1.07,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 1.02,
|
|
"grad_norm": 0.25747236609458923,
|
|
"learning_rate": 7.748713927374974e-05,
|
|
"loss": 1.113,
|
|
"step": 51
|
|
},
|
|
{
|
|
"epoch": 1.04,
|
|
"grad_norm": 0.3350413143634796,
|
|
"learning_rate": 7.497675843765493e-05,
|
|
"loss": 1.2082,
|
|
"step": 52
|
|
},
|
|
{
|
|
"epoch": 1.06,
|
|
"grad_norm": 0.25098180770874023,
|
|
"learning_rate": 7.247133493451886e-05,
|
|
"loss": 1.1663,
|
|
"step": 53
|
|
},
|
|
{
|
|
"epoch": 1.08,
|
|
"grad_norm": 0.2518307864665985,
|
|
"learning_rate": 6.997334131485565e-05,
|
|
"loss": 1.195,
|
|
"step": 54
|
|
},
|
|
{
|
|
"epoch": 1.1,
|
|
"grad_norm": 0.27086207270622253,
|
|
"learning_rate": 6.748524279678152e-05,
|
|
"loss": 1.1827,
|
|
"step": 55
|
|
},
|
|
{
|
|
"epoch": 1.12,
|
|
"grad_norm": 0.2570662498474121,
|
|
"learning_rate": 6.500949483314202e-05,
|
|
"loss": 1.2075,
|
|
"step": 56
|
|
},
|
|
{
|
|
"epoch": 1.1400000000000001,
|
|
"grad_norm": 0.2899071276187897,
|
|
"learning_rate": 6.254854068827662e-05,
|
|
"loss": 1.1787,
|
|
"step": 57
|
|
},
|
|
{
|
|
"epoch": 1.16,
|
|
"grad_norm": 0.2995426654815674,
|
|
"learning_rate": 6.0104809026811634e-05,
|
|
"loss": 1.1946,
|
|
"step": 58
|
|
},
|
|
{
|
|
"epoch": 1.18,
|
|
"grad_norm": 0.2579825818538666,
|
|
"learning_rate": 5.7680711516861674e-05,
|
|
"loss": 1.1632,
|
|
"step": 59
|
|
},
|
|
{
|
|
"epoch": 1.2,
|
|
"grad_norm": 0.2803911864757538,
|
|
"learning_rate": 5.5278640450004216e-05,
|
|
"loss": 1.15,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.22,
|
|
"grad_norm": 0.27153438329696655,
|
|
"learning_rate": 5.2900966380376693e-05,
|
|
"loss": 1.1817,
|
|
"step": 61
|
|
},
|
|
{
|
|
"epoch": 1.24,
|
|
"grad_norm": 0.26352250576019287,
|
|
"learning_rate": 5.055003578522577e-05,
|
|
"loss": 1.1487,
|
|
"step": 62
|
|
},
|
|
{
|
|
"epoch": 1.26,
|
|
"grad_norm": 0.30853864550590515,
|
|
"learning_rate": 4.822816874921756e-05,
|
|
"loss": 1.1649,
|
|
"step": 63
|
|
},
|
|
{
|
|
"epoch": 1.28,
|
|
"grad_norm": 0.2495918869972229,
|
|
"learning_rate": 4.593765667479419e-05,
|
|
"loss": 1.1325,
|
|
"step": 64
|
|
},
|
|
{
|
|
"epoch": 1.3,
|
|
"grad_norm": 0.2213287055492401,
|
|
"learning_rate": 4.3680760020836266e-05,
|
|
"loss": 1.1281,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 1.32,
|
|
"grad_norm": 0.18903322517871857,
|
|
"learning_rate": 4.145970607186277e-05,
|
|
"loss": 1.1476,
|
|
"step": 66
|
|
},
|
|
{
|
|
"epoch": 1.34,
|
|
"grad_norm": 0.20846880972385406,
|
|
"learning_rate": 3.92766867399703e-05,
|
|
"loss": 1.1233,
|
|
"step": 67
|
|
},
|
|
{
|
|
"epoch": 1.3599999999999999,
|
|
"grad_norm": 0.17987675964832306,
|
|
"learning_rate": 3.7133856401680256e-05,
|
|
"loss": 1.1051,
|
|
"step": 68
|
|
},
|
|
{
|
|
"epoch": 1.38,
|
|
"grad_norm": 0.17960353195667267,
|
|
"learning_rate": 3.5033329771829576e-05,
|
|
"loss": 1.1266,
|
|
"step": 69
|
|
},
|
|
{
|
|
"epoch": 1.4,
|
|
"grad_norm": 0.19571684300899506,
|
|
"learning_rate": 3.297717981660216e-05,
|
|
"loss": 1.0983,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 1.42,
|
|
"grad_norm": 0.17779013514518738,
|
|
"learning_rate": 3.09674357077619e-05,
|
|
"loss": 1.0976,
|
|
"step": 71
|
|
},
|
|
{
|
|
"epoch": 1.44,
|
|
"grad_norm": 0.16692589223384857,
|
|
"learning_rate": 2.9006080820104823e-05,
|
|
"loss": 1.0988,
|
|
"step": 72
|
|
},
|
|
{
|
|
"epoch": 1.46,
|
|
"grad_norm": 0.14753758907318115,
|
|
"learning_rate": 2.7095050774107867e-05,
|
|
"loss": 1.1055,
|
|
"step": 73
|
|
},
|
|
{
|
|
"epoch": 1.48,
|
|
"grad_norm": 0.1639815717935562,
|
|
"learning_rate": 2.5236231525704902e-05,
|
|
"loss": 1.117,
|
|
"step": 74
|
|
},
|
|
{
|
|
"epoch": 1.5,
|
|
"grad_norm": 0.16390062868595123,
|
|
"learning_rate": 2.3431457505076205e-05,
|
|
"loss": 1.1131,
|
|
"step": 75
|
|
},
|
|
{
|
|
"epoch": 1.52,
|
|
"grad_norm": 0.15375538170337677,
|
|
"learning_rate": 2.1682509806287094e-05,
|
|
"loss": 1.1111,
|
|
"step": 76
|
|
},
|
|
{
|
|
"epoch": 1.54,
|
|
"grad_norm": 0.13153688609600067,
|
|
"learning_rate": 1.9991114429563236e-05,
|
|
"loss": 1.1045,
|
|
"step": 77
|
|
},
|
|
{
|
|
"epoch": 1.56,
|
|
"grad_norm": 0.14417824149131775,
|
|
"learning_rate": 1.835894057793687e-05,
|
|
"loss": 1.0936,
|
|
"step": 78
|
|
},
|
|
{
|
|
"epoch": 1.58,
|
|
"grad_norm": 0.1515759974718094,
|
|
"learning_rate": 1.678759900994477e-05,
|
|
"loss": 1.0665,
|
|
"step": 79
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"grad_norm": 0.15299999713897705,
|
|
"learning_rate": 1.5278640450004213e-05,
|
|
"loss": 1.0794,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.62,
|
|
"grad_norm": 0.1259460747241974,
|
|
"learning_rate": 1.3833554058035045e-05,
|
|
"loss": 1.084,
|
|
"step": 81
|
|
},
|
|
{
|
|
"epoch": 1.6400000000000001,
|
|
"grad_norm": 0.13053837418556213,
|
|
"learning_rate": 1.2453765959838813e-05,
|
|
"loss": 1.0694,
|
|
"step": 82
|
|
},
|
|
{
|
|
"epoch": 1.6600000000000001,
|
|
"grad_norm": 0.1260591298341751,
|
|
"learning_rate": 1.1140637839684519e-05,
|
|
"loss": 1.0983,
|
|
"step": 83
|
|
},
|
|
{
|
|
"epoch": 1.6800000000000002,
|
|
"grad_norm": 0.11727996915578842,
|
|
"learning_rate": 9.895465596490931e-06,
|
|
"loss": 1.0624,
|
|
"step": 84
|
|
},
|
|
{
|
|
"epoch": 1.7,
|
|
"grad_norm": 0.12494169920682907,
|
|
"learning_rate": 8.719478064930578e-06,
|
|
"loss": 1.0937,
|
|
"step": 85
|
|
},
|
|
{
|
|
"epoch": 1.72,
|
|
"grad_norm": 0.13201913237571716,
|
|
"learning_rate": 7.613835802718452e-06,
|
|
"loss": 1.07,
|
|
"step": 86
|
|
},
|
|
{
|
|
"epoch": 1.74,
|
|
"grad_norm": 0.12661895155906677,
|
|
"learning_rate": 6.579629945281509e-06,
|
|
"loss": 1.0622,
|
|
"step": 87
|
|
},
|
|
{
|
|
"epoch": 1.76,
|
|
"grad_norm": 0.11581739038228989,
|
|
"learning_rate": 5.6178811289398925e-06,
|
|
"loss": 1.0805,
|
|
"step": 88
|
|
},
|
|
{
|
|
"epoch": 1.78,
|
|
"grad_norm": 0.1270962953567505,
|
|
"learning_rate": 4.729538483661964e-06,
|
|
"loss": 1.0685,
|
|
"step": 89
|
|
},
|
|
{
|
|
"epoch": 1.8,
|
|
"grad_norm": 0.1226486787199974,
|
|
"learning_rate": 3.915478696387718e-06,
|
|
"loss": 1.0612,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.8199999999999998,
|
|
"grad_norm": 0.12050648778676987,
|
|
"learning_rate": 3.176505145844555e-06,
|
|
"loss": 1.053,
|
|
"step": 91
|
|
},
|
|
{
|
|
"epoch": 1.8399999999999999,
|
|
"grad_norm": 0.12416510283946991,
|
|
"learning_rate": 2.513347109709514e-06,
|
|
"loss": 1.0649,
|
|
"step": 92
|
|
},
|
|
{
|
|
"epoch": 1.8599999999999999,
|
|
"grad_norm": 0.10619990527629852,
|
|
"learning_rate": 1.9266590449002052e-06,
|
|
"loss": 1.024,
|
|
"step": 93
|
|
},
|
|
{
|
|
"epoch": 1.88,
|
|
"grad_norm": 0.10766904056072235,
|
|
"learning_rate": 1.4170199417049114e-06,
|
|
"loss": 1.0724,
|
|
"step": 94
|
|
},
|
|
{
|
|
"epoch": 1.9,
|
|
"grad_norm": 0.10820022970438004,
|
|
"learning_rate": 9.849327523889873e-07,
|
|
"loss": 1.0594,
|
|
"step": 95
|
|
},
|
|
{
|
|
"epoch": 1.92,
|
|
"grad_norm": 0.10673188418149948,
|
|
"learning_rate": 6.308238948417788e-07,
|
|
"loss": 1.0471,
|
|
"step": 96
|
|
},
|
|
{
|
|
"epoch": 1.94,
|
|
"grad_norm": 0.10520946979522705,
|
|
"learning_rate": 3.550428317536003e-07,
|
|
"loss": 1.0315,
|
|
"step": 97
|
|
},
|
|
{
|
|
"epoch": 1.96,
|
|
"grad_norm": 0.10756369680166245,
|
|
"learning_rate": 1.578617257382753e-07,
|
|
"loss": 1.0129,
|
|
"step": 98
|
|
},
|
|
{
|
|
"epoch": 1.98,
|
|
"grad_norm": 0.11246038973331451,
|
|
"learning_rate": 3.9475170741472005e-08,
|
|
"loss": 1.0481,
|
|
"step": 99
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"grad_norm": 0.11008872836828232,
|
|
"learning_rate": 0.0,
|
|
"loss": 1.0204,
|
|
"step": 100
|
|
}
|
|
],
|
|
"logging_steps": 1,
|
|
"max_steps": 100,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 2,
|
|
"save_steps": 50,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.671112626070618e+18,
|
|
"train_batch_size": 1,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|