55 lines
1.1 KiB
YAML
55 lines
1.1 KiB
YAML
seed: 42
|
|
|
|
### model
|
|
model_name_or_path: meta-llama/Llama-3.2-1B-Instruct
|
|
trust_remote_code: true
|
|
flash_attn: auto
|
|
use_cache: false
|
|
|
|
### method
|
|
stage: sft
|
|
do_train: true
|
|
finetuning_type: full
|
|
|
|
### dataset
|
|
dataset: cola
|
|
template: llama3
|
|
cutoff_len: 2048
|
|
overwrite_cache: true
|
|
preprocessing_num_workers: 4
|
|
dataloader_num_workers: 4
|
|
packing: false
|
|
|
|
### output
|
|
output_dir: saves_bts_preliminary/base/llama-3.2-1b-instruct/train_cola_42_1776331560
|
|
logging_steps: 5
|
|
save_steps: 0.05
|
|
overwrite_output_dir: true
|
|
save_only_model: false
|
|
plot_loss: true
|
|
include_num_input_tokens_seen: true
|
|
push_to_hub: true
|
|
push_to_hub_organization: rbelanec
|
|
load_best_model_at_end: true
|
|
save_total_limit: 1
|
|
|
|
### train
|
|
per_device_train_batch_size: 8
|
|
learning_rate: 5.0e-6
|
|
num_train_epochs: 5
|
|
weight_decay: 1.0e-5
|
|
lr_scheduler_type: cosine
|
|
bf16: true
|
|
ddp_timeout: 180000000
|
|
resume_from_checkpoint: null
|
|
warmup_ratio: 0.1
|
|
optim: adamw_torch
|
|
report_to:
|
|
- wandb
|
|
run_name: base_llama-3.2-1b-instruct_train_cola_42_1776331560
|
|
|
|
### eval
|
|
per_device_eval_batch_size: 8
|
|
eval_strategy: steps
|
|
eval_steps: 0.05
|
|
val_size: 0.1 |