Files
llama2_7b_chat-SSFT-MEDQA-F…/finetune_config.json

24 lines
680 B
JSON
Raw Permalink Normal View History

{
"base_model": "kmseong/llama2_7b-chat-Safety-FT-lr5e-5",
"fine_tuning_type": "Full Parameter Fine-tuning",
"dataset": "medqa",
"num_train_samples": 10178,
"num_eval_samples": 0,
"batch_size": 4,
"grad_accum": 4,
"learning_rate": 3e-05,
"weight_decay": 0.01,
"warmup_ratio": 0.1,
"epochs": 3,
"max_length": 1024,
"max_grad_norm": 1.0,
"lr_scheduler_type": "cosine",
"optimizer": "AdamW (torch)",
"gradient_checkpointing": false,
"dtype": "bf16",
"trainer_type": "Trainer",
"safety_mix_ratio": 0.0,
"safety_data_path": null,
"medqa_train_path": "/home/yonsei_jong/Safety-WaRP-LLM/data/medqa_train_10178.jsonl",
"medqa_eval_path": null
}