初始化项目,由ModelHub XC社区提供模型

Model: uyenlk/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-16 17:33:21 +08:00
commit f3f9598eec
19 changed files with 41766 additions and 0 deletions

616
.hydra/config.yaml Normal file
View File

@@ -0,0 +1,616 @@
model:
model_args:
pretrained_model_name_or_path: open-unlearning/tofu_Llama-3.2-3B-Instruct_full
attn_implementation: flash_attention_2
torch_dtype: bfloat16
tokenizer_args:
pretrained_model_name_or_path: meta-llama/Llama-3.2-3B-Instruct
template_args:
apply_chat_template: true
system_prompt: You are a helpful assistant.
system_prompt_with_special_tokens: '<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a helpful assistant.<|eot_id|>'
user_start_tag: '<|start_header_id|>user<|end_header_id|>
'
user_end_tag: <|eot_id|>
asst_start_tag: '<|start_header_id|>assistant<|end_header_id|>
'
asst_end_tag: <|eot_id|>
date_string: 10 Apr 2025
trainer:
handler: RMU
args:
per_device_train_batch_size: 1
per_device_eval_batch_size: 1
gradient_accumulation_steps: 32
learning_rate: 5.0e-05
bf16: true
bf16_full_eval: true
logging_steps: 5
output_dir: ${paths.output_dir}
logging_dir: ${trainer.args.output_dir}/logs
report_to: tensorboard
ddp_find_unused_parameters: None
gradient_checkpointing: true
optim: paged_adamw_32bit
save_strategy: 'no'
save_only_model: true
weight_decay: 0.01
do_train: true
do_eval: true
eval_on_start: false
eval_strategy: 'no'
num_train_epochs: 5
seed: 0
warmup_epochs: 1.0
remove_unused_columns: false
method_args:
gamma: 1.0
alpha: 1
retain_loss_type: EMBED_DIFF
steering_coeff: 10
module_regex: model\.layers\.26
trainable_params_regex:
- .*
data:
forget:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${forget_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
retain:
TOFU_QA_retain:
handler: QADataset
args:
hf_args:
name: ${retain_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
anchor: forget
collator:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
eval:
tofu:
metrics:
forget_quality:
pre_compute:
forget_truth_ratio:
pre_compute:
forget_Q_A_PARA_Prob:
datasets:
TOFU_QA_forget_para:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
forget_Q_A_PERT_Prob:
datasets:
TOFU_QA_forget_pert:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: closer_to_1_better
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
forget_truth_ratio:
access_key: retain
handler: ks_test
forget_Q_A_Prob:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
forget_Q_A_ROUGE:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
model_utility:
pre_compute:
retain_Q_A_Prob:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
retain_Q_A_ROUGE:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
retain_Truth_Ratio:
pre_compute:
retain_Q_A_PARA_Prob:
datasets:
TOFU_QA_retain_para:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
retain_Q_A_PERT_Prob:
datasets:
TOFU_QA_retain_pert:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
ra_Q_A_Prob_normalised:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
ra_Q_A_ROUGE:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
ra_Truth_Ratio:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
wf_Q_A_Prob_normalised:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
wf_Q_A_ROUGE:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
wf_Truth_Ratio:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
handler: hm_aggregate
privleak:
pre_compute:
mia_min_k:
datasets:
TOFU_QA_forget:
access_key: forget
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
TOFU_QA_holdout:
access_key: holdout
handler: QADataset
args:
hf_args:
name: ${eval.tofu.holdout_split}
path: locuslab/TOFU
split: train
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
batch_size: ${eval.tofu.batch_size}
handler: mia_min_k
k: 0.4
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
mia_min_k:
access_key: retain
handler: privleak
ref_value: 0.5
extraction_strength:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: extraction_strength
batch_size: ${eval.tofu.batch_size}
handler: TOFUEvaluator
output_dir: ${paths.output_dir}
overwrite: true
forget_split: ${forget_split}
holdout_split: ${holdout_split}
retain_logs_path: ${retain_logs_path}
question_key: ${question_key}
batch_size: 16
paths:
root_dir: .
data_dir: ${paths.root_dir}/data/
datasets: ${paths.root_dir}/configs/data/datasets
output_dir: ${paths.root_dir}/saves/${mode}/${task_name}
work_dir: ${hydra:runtime.cwd}
forget_split: forget10
retain_split: retain90
holdout_split: holdout10
retain_logs_path: null
question_key: question
task_name: RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
mode: unlearn