Files

57 lines
1.5 KiB
JSON
Raw Permalink Normal View History

{
"started_at": "2026-01-12T08:52:02+01:00",
"repos": {
"merged": "Mathieu-Thomas-JOSSET/joke-20260112-081758",
"gguf": "Mathieu-Thomas-JOSSET/joke-finetome-model-gguf-phi4-20260112-081758"
},
"model_name": "unsloth/Phi-4-unsloth-bnb-4bit",
"dataset": {
"name": "Mathieu-Thomas-JOSSET/michael_abab_conversations_infini_instruct.jsonl",
"split": "train"
},
"training": {
"max_steps": 2000,
"learning_rate": 9.95267419777795e-06,
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 4,
"max_seq_length": 2048,
"seed": 3407,
"optimizer": "adamw_8bit",
"lr_scheduler_type": "linear"
},
"auto_lr": {
"enabled": true,
"use_n": "train",
"n_ref": 1436,
"base": 1e-05,
"mult": 0.5,
"final": 5e-06
},
"metrics": {
"train_runtime": 2220.807,
"train_samples_per_second": 5.944,
"train_steps_per_second": 0.743,
"total_flos": 4.35104765343744e+16,
"train_loss": 1.654274252106746,
"epoch": 3.3342618384401113
},
"best": {
"checkpoint": "/content/outputs/continue_r1_from_350_20260112_073729/checkpoint-100",
"metric": 2.2380564212799072,
"metric_name": "eval_loss"
},
"plotly": {
"html": "reports/training_loss_step.html",
"png": null
},
"inference_sample": {
"source": "dataset",
"index": 232,
"messages": [
{
"role": "user",
"content": "Dwight: \"Oh, man.\"\nMichael: \"How did we do it?\"\nDwight: \"I dont … have no idea.\""
}
]
}
}