233 lines
5.6 KiB
JSON
233 lines
5.6 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 1.0,
|
|
"eval_steps": 500,
|
|
"global_step": 136,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.03690036900369004,
|
|
"grad_norm": 0.12055090069770813,
|
|
"learning_rate": 5.7142857142857145e-06,
|
|
"loss": 0.1315,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.07380073800738007,
|
|
"grad_norm": 0.043656233698129654,
|
|
"learning_rate": 1.2857142857142859e-05,
|
|
"loss": 0.1031,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.11070110701107011,
|
|
"grad_norm": 0.07571940124034882,
|
|
"learning_rate": 2e-05,
|
|
"loss": 0.0986,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 0.14760147601476015,
|
|
"grad_norm": 0.06417621672153473,
|
|
"learning_rate": 1.9917226741361014e-05,
|
|
"loss": 0.108,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.18450184501845018,
|
|
"grad_norm": 0.07998916506767273,
|
|
"learning_rate": 1.9670277247913205e-05,
|
|
"loss": 0.1,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 0.22140221402214022,
|
|
"grad_norm": 0.07391388714313507,
|
|
"learning_rate": 1.9263239682514953e-05,
|
|
"loss": 0.0982,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.25830258302583026,
|
|
"grad_norm": 0.11852724105119705,
|
|
"learning_rate": 1.8702852410301556e-05,
|
|
"loss": 0.096,
|
|
"step": 35
|
|
},
|
|
{
|
|
"epoch": 0.2952029520295203,
|
|
"grad_norm": 0.056973058730363846,
|
|
"learning_rate": 1.7998392447397197e-05,
|
|
"loss": 0.0934,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.33210332103321033,
|
|
"grad_norm": 0.09912875294685364,
|
|
"learning_rate": 1.7161521883143936e-05,
|
|
"loss": 0.085,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 0.36900369003690037,
|
|
"grad_norm": 0.07187044620513916,
|
|
"learning_rate": 1.6206094818274228e-05,
|
|
"loss": 0.0839,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.4059040590405904,
|
|
"grad_norm": 0.12322689592838287,
|
|
"learning_rate": 1.5147928015098309e-05,
|
|
"loss": 0.0793,
|
|
"step": 55
|
|
},
|
|
{
|
|
"epoch": 0.44280442804428044,
|
|
"grad_norm": 0.06790988892316818,
|
|
"learning_rate": 1.4004539056512667e-05,
|
|
"loss": 0.0789,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.4797047970479705,
|
|
"grad_norm": 0.10258402675390244,
|
|
"learning_rate": 1.2794856348516095e-05,
|
|
"loss": 0.0728,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 0.5166051660516605,
|
|
"grad_norm": 0.10938461869955063,
|
|
"learning_rate": 1.153890576704062e-05,
|
|
"loss": 0.07,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.5535055350553506,
|
|
"grad_norm": 0.0713815838098526,
|
|
"learning_rate": 1.0257479136549889e-05,
|
|
"loss": 0.0634,
|
|
"step": 75
|
|
},
|
|
{
|
|
"epoch": 0.5904059040590406,
|
|
"grad_norm": 0.1400076448917389,
|
|
"learning_rate": 8.971790028626395e-06,
|
|
"loss": 0.063,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.6273062730627307,
|
|
"grad_norm": 0.08636516332626343,
|
|
"learning_rate": 7.703122578682047e-06,
|
|
"loss": 0.0592,
|
|
"step": 85
|
|
},
|
|
{
|
|
"epoch": 0.6642066420664207,
|
|
"grad_norm": 0.12527909874916077,
|
|
"learning_rate": 6.472479134509052e-06,
|
|
"loss": 0.0599,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 0.7011070110701108,
|
|
"grad_norm": 0.11907720565795898,
|
|
"learning_rate": 5.300232569726805e-06,
|
|
"loss": 0.0549,
|
|
"step": 95
|
|
},
|
|
{
|
|
"epoch": 0.7380073800738007,
|
|
"grad_norm": 0.129660964012146,
|
|
"learning_rate": 4.205789017954364e-06,
|
|
"loss": 0.0529,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.7749077490774908,
|
|
"grad_norm": 0.11697438359260559,
|
|
"learning_rate": 3.207266611027069e-06,
|
|
"loss": 0.0536,
|
|
"step": 105
|
|
},
|
|
{
|
|
"epoch": 0.8118081180811808,
|
|
"grad_norm": 0.10215689241886139,
|
|
"learning_rate": 2.3211955396340003e-06,
|
|
"loss": 0.0468,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 0.8487084870848709,
|
|
"grad_norm": 0.11825434118509293,
|
|
"learning_rate": 1.5622444017681438e-06,
|
|
"loss": 0.0534,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 0.8856088560885609,
|
|
"grad_norm": 0.11656656116247177,
|
|
"learning_rate": 9.42977369195286e-07,
|
|
"loss": 0.0534,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.922509225092251,
|
|
"grad_norm": 0.10192801058292389,
|
|
"learning_rate": 4.73646191966175e-07,
|
|
"loss": 0.0479,
|
|
"step": 125
|
|
},
|
|
{
|
|
"epoch": 0.959409594095941,
|
|
"grad_norm": 0.08949802815914154,
|
|
"learning_rate": 1.6202048426483652e-07,
|
|
"loss": 0.0491,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 0.996309963099631,
|
|
"grad_norm": 0.0758034735918045,
|
|
"learning_rate": 1.325910115169471e-08,
|
|
"loss": 0.0455,
|
|
"step": 135
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"step": 136,
|
|
"total_flos": 2.50613257277014e+17,
|
|
"train_loss": 0.07390243510770447,
|
|
"train_runtime": 1155.3856,
|
|
"train_samples_per_second": 14.999,
|
|
"train_steps_per_second": 0.118
|
|
}
|
|
],
|
|
"logging_steps": 5,
|
|
"max_steps": 136,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 1,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.50613257277014e+17,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|