Files
llama2_7b_chat-SSFT-MMLU-FT…/finetune_config.json

24 lines
687 B
JSON
Raw Normal View History

{
"base_model": "kmseong/llama2_7b-chat-Safety-FT-lr5e-5",
"fine_tuning_type": "Full Parameter Fine-tuning",
"dataset": "mmlu",
"mmlu_subject": "all",
"mmlu_split": "auxiliary_train",
"num_train_samples": 8250,
"num_eval_samples": 0,
"batch_size": 4,
"grad_accum": 4,
"learning_rate": 3e-05,
"weight_decay": 0.01,
"warmup_ratio": 0.1,
"epochs": 3,
"max_length": 1024,
"max_grad_norm": 1.0,
"lr_scheduler_type": "cosine",
"optimizer": "AdamW (torch)",
"gradient_checkpointing": false,
"dtype": "bf16",
"trainer_type": "Trainer",
"safety_mix_ratio": 0.1,
"safety_data_path": "/home/yonsei_jong/Safety-WaRP-LLM/data/circuit_breakers_train.json"
}