Model: oxtie/Qwen2.5-0.5B-Instruct-Gensyn-Swarm-hardy_feathered_anaconda Source: Original Platform
189 lines
6.7 KiB
JSON
189 lines
6.7 KiB
JSON
{
|
|
"best_global_step": null,
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 5.0,
|
|
"eval_steps": 500,
|
|
"global_step": 15,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"completion_length": 275.15625,
|
|
"epoch": 0.7272727272727273,
|
|
"grad_norm": 51.914981842041016,
|
|
"kl": 0.0,
|
|
"learning_rate": 5e-07,
|
|
"loss": 0.0,
|
|
"reward": 2.868629149161279,
|
|
"reward_std": 1.1547005318570882,
|
|
"rewards/concensus_correctness_reward_func": 0.503312504850328,
|
|
"rewards/consensus_reward_func": 0.75,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.125,
|
|
"rewards/question_recreation_reward_func": 0.7467853534035385,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.15625,
|
|
"rewards/xmlcount_reward_func": 0.5872812583111227,
|
|
"step": 2
|
|
},
|
|
{
|
|
"completion_length": 144.78571428571428,
|
|
"epoch": 1.3636363636363638,
|
|
"grad_norm": 7581674.0,
|
|
"kl": 135191.8410544875,
|
|
"learning_rate": 4.752422169756047e-07,
|
|
"loss": 118.2929,
|
|
"reward": 5.744728139468601,
|
|
"reward_std": 0.4292134895388569,
|
|
"rewards/concensus_correctness_reward_func": 1.3088571386677879,
|
|
"rewards/consensus_reward_func": 1.5714285714285714,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.5,
|
|
"rewards/question_recreation_reward_func": 0.9432996256010873,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.2857142857142857,
|
|
"rewards/xmlcount_reward_func": 1.1354285734040397,
|
|
"step": 4
|
|
},
|
|
{
|
|
"completion_length": 197.89285714285714,
|
|
"epoch": 2.0,
|
|
"grad_norm": 2249.467041015625,
|
|
"kl": 88.22475246286818,
|
|
"learning_rate": 4.058724504646834e-07,
|
|
"loss": 0.0772,
|
|
"reward": 5.14643143756049,
|
|
"reward_std": 0.416251039398568,
|
|
"rewards/concensus_correctness_reward_func": 1.1512142920068331,
|
|
"rewards/consensus_reward_func": 1.5,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.14285714285714285,
|
|
"rewards/question_recreation_reward_func": 0.9342885783740452,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.30357142857142855,
|
|
"rewards/xmlcount_reward_func": 1.1145000117165702,
|
|
"step": 6
|
|
},
|
|
{
|
|
"completion_length": 165.0625,
|
|
"epoch": 2.7272727272727275,
|
|
"grad_norm": 325299.1875,
|
|
"kl": 9993.062653008848,
|
|
"learning_rate": 3.056302334890786e-07,
|
|
"loss": 9.9931,
|
|
"reward": 5.707393378019333,
|
|
"reward_std": 0.45466129238775466,
|
|
"rewards/concensus_correctness_reward_func": 1.3366875052452087,
|
|
"rewards/consensus_reward_func": 1.625,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.375,
|
|
"rewards/question_recreation_reward_func": 0.9530495591461658,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.28125,
|
|
"rewards/xmlcount_reward_func": 1.1364062651991844,
|
|
"step": 8
|
|
},
|
|
{
|
|
"completion_length": 154.57142857142858,
|
|
"epoch": 3.3636363636363638,
|
|
"grad_norm": 36.90648651123047,
|
|
"kl": 1.0686370520187276,
|
|
"learning_rate": 1.9436976651092142e-07,
|
|
"loss": 0.0009,
|
|
"reward": 4.954324381692069,
|
|
"reward_std": 0.49093594934259144,
|
|
"rewards/concensus_correctness_reward_func": 1.1099285749452454,
|
|
"rewards/consensus_reward_func": 1.5,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.0,
|
|
"rewards/question_recreation_reward_func": 0.9175744141851153,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.30357142857142855,
|
|
"rewards/xmlcount_reward_func": 1.1232500076293945,
|
|
"step": 10
|
|
},
|
|
{
|
|
"completion_length": 206.75,
|
|
"epoch": 4.0,
|
|
"grad_norm": 20.735090255737305,
|
|
"kl": 25.556736263845647,
|
|
"learning_rate": 9.412754953531663e-08,
|
|
"loss": 0.0224,
|
|
"reward": 5.51260210786547,
|
|
"reward_std": 0.5624070341166642,
|
|
"rewards/concensus_correctness_reward_func": 1.2301428520253725,
|
|
"rewards/consensus_reward_func": 1.4285714285714286,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.42857142857142855,
|
|
"rewards/question_recreation_reward_func": 0.9858521180493491,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.2857142857142857,
|
|
"rewards/xmlcount_reward_func": 1.1537500045129232,
|
|
"step": 12
|
|
},
|
|
{
|
|
"completion_length": 166.375,
|
|
"epoch": 4.7272727272727275,
|
|
"grad_norm": 78604.875,
|
|
"kl": 1646.9266037647612,
|
|
"learning_rate": 2.475778302439524e-08,
|
|
"loss": 1.6469,
|
|
"reward": 5.418441787362099,
|
|
"reward_std": 0.7625497430562973,
|
|
"rewards/concensus_correctness_reward_func": 1.3616875037550926,
|
|
"rewards/consensus_reward_func": 1.5625,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.125,
|
|
"rewards/question_recreation_reward_func": 0.9354105480015278,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.296875,
|
|
"rewards/xmlcount_reward_func": 1.1369687542319298,
|
|
"step": 14
|
|
},
|
|
{
|
|
"completion_length": 131.16666666666666,
|
|
"epoch": 5.0,
|
|
"kl": 54391691.78967813,
|
|
"reward": 5.036158482233684,
|
|
"reward_std": 0.8958913286526998,
|
|
"rewards/concensus_correctness_reward_func": 0.6153333373367786,
|
|
"rewards/consensus_reward_func": 1.3333333333333333,
|
|
"rewards/cumulative_reward_2": 0.0,
|
|
"rewards/final_correctness_reward_func": 0.6666666666666666,
|
|
"rewards/question_recreation_reward_func": 0.9208252330621084,
|
|
"rewards/soft_format_reward_func": 0.0,
|
|
"rewards/strict_format_reward_func": 0.2916666666666667,
|
|
"rewards/xmlcount_reward_func": 1.2083333333333333,
|
|
"step": 15,
|
|
"total_flos": 0.0,
|
|
"train_loss": 2736.9224156717382,
|
|
"train_runtime": 115.6483,
|
|
"train_samples_per_second": 2.075,
|
|
"train_steps_per_second": 0.13
|
|
}
|
|
],
|
|
"logging_steps": 2,
|
|
"max_steps": 15,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 8,
|
|
"save_steps": 25,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 0.0,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|