38 lines
987 B
YAML
38 lines
987 B
YAML
|
|
assistant_tag: gpt
|
||
|
|
bf16: true
|
||
|
|
content_tag: value
|
||
|
|
cutoff_len: 16384
|
||
|
|
dataset: mlfoundations-dev/code_ablate_duplications_3
|
||
|
|
dataset_dir: ONLINE
|
||
|
|
ddp_timeout: 180000000
|
||
|
|
deepspeed: dcft/train/zero3.json
|
||
|
|
do_train: true
|
||
|
|
eval_strategy: 'no'
|
||
|
|
finetuning_type: full
|
||
|
|
formatting: sharegpt
|
||
|
|
global_batch_size: 96
|
||
|
|
gradient_accumulation_steps: 3
|
||
|
|
hub_model_id: mlfoundations-dev/qwen2-5_code_ablate_duplications_3
|
||
|
|
include_hp: dcft/train/hp_settings/reasoning_old.yaml
|
||
|
|
learning_rate: 1.0e-05
|
||
|
|
logging_steps: 1
|
||
|
|
lr_scheduler_type: cosine
|
||
|
|
messages: conversations
|
||
|
|
model_name_or_path: Qwen/Qwen2.5-7B-Instruct
|
||
|
|
num_train_epochs: 3.0
|
||
|
|
output_dir: /tmp/dcft_checkpoints/train/checkpoints/qwen2-5_code_ablate_duplications_3
|
||
|
|
overwrite_cache: true
|
||
|
|
per_device_train_batch_size: 1
|
||
|
|
plot_loss: true
|
||
|
|
preprocessing_num_workers: 16
|
||
|
|
push_to_db: true
|
||
|
|
push_to_hub: true
|
||
|
|
report_to: wandb
|
||
|
|
role_tag: from
|
||
|
|
run_name: qwen2-5_code_ablate_duplications_3
|
||
|
|
save_strategy: epoch
|
||
|
|
stage: sft
|
||
|
|
template: qwen25
|
||
|
|
user_tag: human
|
||
|
|
warmup_ratio: 0.1
|