38 lines
935 B
YAML
38 lines
935 B
YAML
assistant_tag: gpt
|
|
bf16: true
|
|
content_tag: value
|
|
cutoff_len: 16384
|
|
dataloader_num_workers: 4
|
|
dataloader_persistent_workers: true
|
|
dataloader_pin_memory: true
|
|
dataset: mlfoundations-dev/e1_code_fasttext_r1_1k
|
|
dataset_dir: ONLINE
|
|
ddp_timeout: 180000000
|
|
deepspeed: dcft/train/zero3.json
|
|
do_train: true
|
|
enable_liger_kernel: true
|
|
finetuning_type: full
|
|
global_batch_size: 96
|
|
gradient_accumulation_steps: 24
|
|
hub_model_id: mlfoundations-dev/e1_code_fasttext_r1_1k
|
|
learning_rate: 2.0e-05
|
|
logging_steps: 1
|
|
lr_scheduler_type: cosine
|
|
messages: conversations
|
|
model_name_or_path: Qwen/Qwen2.5-7B-Instruct
|
|
num_train_epochs: 7.0
|
|
output_dir: /data/cat/ws/ryma833h-dcft/checkpoints/e1_code_fasttext_r1_1k
|
|
overwrite_cache: true
|
|
per_device_train_batch_size: 1
|
|
plot_loss: true
|
|
preprocessing_num_workers: 16
|
|
push_to_db: true
|
|
push_to_hub: true
|
|
report_to: wandb
|
|
role_tag: from
|
|
save_strategy: epoch
|
|
stage: sft
|
|
template: qwen25
|
|
user_tag: human
|
|
warmup_ratio: 0.1
|