113 lines
2.8 KiB
YAML
113 lines
2.8 KiB
YAML
|
|
architecture:
|
||
|
|
backbone_dtype: float16
|
||
|
|
force_embedding_gradients: false
|
||
|
|
gradient_checkpointing: true
|
||
|
|
intermediate_dropout: 0.0
|
||
|
|
pretrained: true
|
||
|
|
pretrained_weights: ''
|
||
|
|
augmentation:
|
||
|
|
random_parent_probability: 0.0
|
||
|
|
skip_parent_probability: 0.0
|
||
|
|
token_mask_probability: 0.0
|
||
|
|
dataset:
|
||
|
|
add_eos_token_to_answer: true
|
||
|
|
add_eos_token_to_prompt: true
|
||
|
|
answer_column: output
|
||
|
|
chatbot_author: PAIX.cloud
|
||
|
|
chatbot_name: Astrid
|
||
|
|
data_sample: 1.0
|
||
|
|
data_sample_choice:
|
||
|
|
- Train
|
||
|
|
- Validation
|
||
|
|
limit_chained_samples: false
|
||
|
|
mask_prompt_labels: true
|
||
|
|
parent_id_column: None
|
||
|
|
personalize: true
|
||
|
|
prompt_column:
|
||
|
|
- instruction
|
||
|
|
text_answer_separator: <|answer|>
|
||
|
|
text_prompt_start: <|prompt|>
|
||
|
|
train_dataframe: data/user/oasst/train_full.pq
|
||
|
|
validation_dataframe: None
|
||
|
|
validation_size: 0.01
|
||
|
|
validation_strategy: automatic
|
||
|
|
environment:
|
||
|
|
compile_model: false
|
||
|
|
find_unused_parameters: false
|
||
|
|
gpus:
|
||
|
|
- '0'
|
||
|
|
huggingface_branch: main
|
||
|
|
mixed_precision: true
|
||
|
|
number_of_workers: 8
|
||
|
|
seed: -1
|
||
|
|
trust_remote_code: true
|
||
|
|
use_fsdp: false
|
||
|
|
experiment_name: Astrid-1B-1
|
||
|
|
llm_backbone: EleutherAI/pythia-1b-deduped
|
||
|
|
logging:
|
||
|
|
logger: Neptune
|
||
|
|
neptune_project: llmstudio
|
||
|
|
number_of_texts: 10
|
||
|
|
output_directory: output/user/Astrid-1B-1/
|
||
|
|
prediction:
|
||
|
|
batch_size_inference: 0
|
||
|
|
do_sample: false
|
||
|
|
max_length_inference: 256
|
||
|
|
metric: GPT
|
||
|
|
metric_gpt_model: gpt-3.5-turbo-0301
|
||
|
|
min_length_inference: 2
|
||
|
|
num_beams: 1
|
||
|
|
num_history: 2
|
||
|
|
repetition_penalty: 1.2
|
||
|
|
stop_tokens: ''
|
||
|
|
temperature: 0.3
|
||
|
|
top_k: 0
|
||
|
|
top_p: 1.0
|
||
|
|
problem_type: text_causal_language_modeling
|
||
|
|
tokenizer:
|
||
|
|
add_prefix_space: false
|
||
|
|
add_prompt_answer_tokens: false
|
||
|
|
max_length: 512
|
||
|
|
max_length_answer: 256
|
||
|
|
max_length_prompt: 256
|
||
|
|
padding_quantile: 1.0
|
||
|
|
use_fast: true
|
||
|
|
training:
|
||
|
|
adaptive_kl_control: true
|
||
|
|
advantages_gamma: 0.99
|
||
|
|
advantages_lambda: 0.95
|
||
|
|
batch_size: 10
|
||
|
|
differential_learning_rate: 1.0e-05
|
||
|
|
differential_learning_rate_layers: []
|
||
|
|
drop_last_batch: true
|
||
|
|
epochs: 3
|
||
|
|
evaluate_before_training: false
|
||
|
|
evaluation_epochs: 1.0
|
||
|
|
grad_accumulation: 1
|
||
|
|
gradient_clip: 0.0
|
||
|
|
initial_kl_coefficient: 0.2
|
||
|
|
kl_horizon: 10000
|
||
|
|
kl_target: 6.0
|
||
|
|
learning_rate: 0.0001
|
||
|
|
lora: true
|
||
|
|
lora_alpha: 16
|
||
|
|
lora_dropout: 0.05
|
||
|
|
lora_r: 4
|
||
|
|
lora_target_modules: ''
|
||
|
|
loss_function: TokenAveragedCrossEntropy
|
||
|
|
offload_reward_model: false
|
||
|
|
optimizer: AdamW
|
||
|
|
ppo_batch_size: 1
|
||
|
|
ppo_clip_policy: 0.2
|
||
|
|
ppo_clip_value: 0.2
|
||
|
|
ppo_epochs: 4
|
||
|
|
ppo_generate_temperature: 1.0
|
||
|
|
reward_model: OpenAssistant/reward-model-deberta-v3-large-v2
|
||
|
|
save_best_checkpoint: false
|
||
|
|
scaling_factor_value_loss: 0.1
|
||
|
|
schedule: Cosine
|
||
|
|
train_validation_data: false
|
||
|
|
use_rlhf: false
|
||
|
|
warmup_epochs: 0.0
|
||
|
|
weight_decay: 0.0
|