40 lines
1.2 KiB
Plaintext
40 lines
1.2 KiB
Plaintext
|
|
#!/bin/bash
|
||
|
|
#SBATCH --job-name=apigen-fine-tune
|
||
|
|
#SBATCH --partition=hopper-prod
|
||
|
|
#SBATCH --qos=normal
|
||
|
|
#SBATCH --nodes=1
|
||
|
|
#SBATCH --ntasks-per-node=1
|
||
|
|
#SBATCH --gpus-per-node=8
|
||
|
|
#SBATCH --output=./logs/%x-%j.out
|
||
|
|
#SBATCH --err=./logs/%x-%j.err
|
||
|
|
#SBATCH --time=02-00:00:00
|
||
|
|
|
||
|
|
set -ex
|
||
|
|
|
||
|
|
module load cuda/12.1
|
||
|
|
|
||
|
|
source .venv/bin/activate
|
||
|
|
|
||
|
|
srun --nodes=1 --ntasks=1 --export=ALL,ACCELERATE_LOG_LEVEL=info accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml examples/scripts/sft.py \
|
||
|
|
--run_name=Llama-3.2-1B-Instruct-APIGen-FC-v0.1 \
|
||
|
|
--model_name_or_path="meta-llama/Llama-3.2-1B-Instruct" \
|
||
|
|
--dataset_name="plaguss/apigen-synth-trl" \
|
||
|
|
--report_to="wandb" \
|
||
|
|
--learning_rate=5.0e-06 \
|
||
|
|
--lr_scheduler_type="cosine" \
|
||
|
|
--per_device_train_batch_size=6 \
|
||
|
|
--per_device_eval_batch_size=6 \
|
||
|
|
--do_eval \
|
||
|
|
--eval_strategy="steps" \
|
||
|
|
--gradient_accumulation_steps=2 \
|
||
|
|
--output_dir="data/Llama-3.2-1B-Instruct-APIGen-FC-v0.1" \
|
||
|
|
--logging_steps=5 \
|
||
|
|
--eval_steps=50 \
|
||
|
|
--num_train_epochs=2 \
|
||
|
|
--max_steps=-1 \
|
||
|
|
--warmup_steps=50 \
|
||
|
|
--max_seq_length=2048 \
|
||
|
|
--push_to_hub \
|
||
|
|
--gradient_checkpointing \
|
||
|
|
--bf16
|