初始化项目,由ModelHub XC社区提供模型

Model: uyenlk/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-16 17:33:21 +08:00
commit f3f9598eec
19 changed files with 41766 additions and 0 deletions

616
.hydra/config.yaml Normal file
View File

@@ -0,0 +1,616 @@
model:
model_args:
pretrained_model_name_or_path: open-unlearning/tofu_Llama-3.2-3B-Instruct_full
attn_implementation: flash_attention_2
torch_dtype: bfloat16
tokenizer_args:
pretrained_model_name_or_path: meta-llama/Llama-3.2-3B-Instruct
template_args:
apply_chat_template: true
system_prompt: You are a helpful assistant.
system_prompt_with_special_tokens: '<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a helpful assistant.<|eot_id|>'
user_start_tag: '<|start_header_id|>user<|end_header_id|>
'
user_end_tag: <|eot_id|>
asst_start_tag: '<|start_header_id|>assistant<|end_header_id|>
'
asst_end_tag: <|eot_id|>
date_string: 10 Apr 2025
trainer:
handler: RMU
args:
per_device_train_batch_size: 1
per_device_eval_batch_size: 1
gradient_accumulation_steps: 32
learning_rate: 5.0e-05
bf16: true
bf16_full_eval: true
logging_steps: 5
output_dir: ${paths.output_dir}
logging_dir: ${trainer.args.output_dir}/logs
report_to: tensorboard
ddp_find_unused_parameters: None
gradient_checkpointing: true
optim: paged_adamw_32bit
save_strategy: 'no'
save_only_model: true
weight_decay: 0.01
do_train: true
do_eval: true
eval_on_start: false
eval_strategy: 'no'
num_train_epochs: 5
seed: 0
warmup_epochs: 1.0
remove_unused_columns: false
method_args:
gamma: 1.0
alpha: 1
retain_loss_type: EMBED_DIFF
steering_coeff: 10
module_regex: model\.layers\.26
trainable_params_regex:
- .*
data:
forget:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${forget_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
retain:
TOFU_QA_retain:
handler: QADataset
args:
hf_args:
name: ${retain_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
anchor: forget
collator:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
eval:
tofu:
metrics:
forget_quality:
pre_compute:
forget_truth_ratio:
pre_compute:
forget_Q_A_PARA_Prob:
datasets:
TOFU_QA_forget_para:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
forget_Q_A_PERT_Prob:
datasets:
TOFU_QA_forget_pert:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: closer_to_1_better
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
forget_truth_ratio:
access_key: retain
handler: ks_test
forget_Q_A_Prob:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
forget_Q_A_ROUGE:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
model_utility:
pre_compute:
retain_Q_A_Prob:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
retain_Q_A_ROUGE:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
retain_Truth_Ratio:
pre_compute:
retain_Q_A_PARA_Prob:
datasets:
TOFU_QA_retain_para:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
retain_Q_A_PERT_Prob:
datasets:
TOFU_QA_retain_pert:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
ra_Q_A_Prob_normalised:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
ra_Q_A_ROUGE:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
ra_Truth_Ratio:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
wf_Q_A_Prob_normalised:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
wf_Q_A_ROUGE:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
wf_Truth_Ratio:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
handler: hm_aggregate
privleak:
pre_compute:
mia_min_k:
datasets:
TOFU_QA_forget:
access_key: forget
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
TOFU_QA_holdout:
access_key: holdout
handler: QADataset
args:
hf_args:
name: ${eval.tofu.holdout_split}
path: locuslab/TOFU
split: train
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
batch_size: ${eval.tofu.batch_size}
handler: mia_min_k
k: 0.4
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
mia_min_k:
access_key: retain
handler: privleak
ref_value: 0.5
extraction_strength:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: extraction_strength
batch_size: ${eval.tofu.batch_size}
handler: TOFUEvaluator
output_dir: ${paths.output_dir}
overwrite: true
forget_split: ${forget_split}
holdout_split: ${holdout_split}
retain_logs_path: ${retain_logs_path}
question_key: ${question_key}
batch_size: 16
paths:
root_dir: .
data_dir: ${paths.root_dir}/data/
datasets: ${paths.root_dir}/configs/data/datasets
output_dir: ${paths.root_dir}/saves/${mode}/${task_name}
work_dir: ${hydra:runtime.cwd}
forget_split: forget10
retain_split: retain90
holdout_split: holdout10
retain_logs_path: null
question_key: question
task_name: RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
mode: unlearn

277
.hydra/hydra.yaml Normal file
View File

@@ -0,0 +1,277 @@
hydra:
run:
dir: ${paths.output_dir}
sweep:
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
subdir: ${hydra.job.num}
launcher:
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params: null
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
formatters:
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
root:
level: INFO
handlers:
- console
disable_existing_loggers: false
job_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- %(message)s'
log_colors:
DEBUG: purple
INFO: green
WARNING: yellow
ERROR: red
CRITICAL: red
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: simple
filename: ${hydra.runtime.output_dir}/${trainer.handler}.log
root:
level: INFO
handlers:
- console
- file
disable_existing_loggers: false
env: {}
mode: RUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra.mode=RUN
task:
- experiment=unlearn/tofu/default
- model=Llama-3.2-3B-Instruct
- model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full
- forget_split=forget10
- retain_split=retain90
- trainer=RMU
- trainer.method_args.steering_coeff=10
- trainer.method_args.module_regex=model\.layers\.26
- trainer.args.learning_rate=5e-5
- task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
job:
name: train
chdir: null
override_dirname: experiment=unlearn/tofu/default,forget_split=forget10,model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full,model=Llama-3.2-3B-Instruct,retain_split=retain90,task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26,trainer.args.learning_rate=5e-5,trainer.method_args.module_regex=model\.layers\.26,trainer.method_args.steering_coeff=10,trainer=RMU
id: ???
num: ???
config_name: unlearn.yaml
env_set: {}
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.0
version_base: '1.3'
cwd: /raid/home/v126826/open-unlearning
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: /raid/home/v126826/open-unlearning/configs
schema: file
provider: main
- path: hydra_plugins.hydra_colorlog.conf
schema: pkg
provider: hydra-colorlog
- path: ''
schema: structured
provider: schema
output_dir: /raid/home/v126826/open-unlearning/saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
choices:
experiment: unlearn/tofu/default
paths: default
hydra: default
eval: tofu
eval/tofu_metrics/../../collator@eval.tofu.metrics.extraction_strength.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.extraction_strength.datasets: TOFU_QA_forget
eval/tofu_metrics/.@eval.tofu.metrics.privleak.pre_compute.mia_min_k: mia_min_k
eval/tofu_metrics/./../../collator@eval.tofu.metrics.privleak.pre_compute.mia_min_k.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.privleak.pre_compute.mia_min_k.datasets: TOFU_MIA
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio: wf_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob: wf_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob.datasets
: TOFU_QA_wf_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob: wf_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob.datasets
: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE: wf_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.datasets: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised: wf_Q_A_Prob_normalised
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob: wf_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob.datasets
: TOFU_QA_wf_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob: wf_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob.datasets
: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio: ra_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob: ra_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob.datasets
: TOFU_QA_ra_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob: ra_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob.datasets
: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE: ra_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.datasets: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised: ra_Q_A_Prob_normalised
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob: ra_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob.datasets
: TOFU_QA_ra_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob: ra_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob.datasets
: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio: retain_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob: retain_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob.datasets
: TOFU_QA_retain_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob: retain_Q_A_PARA_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob.datasets
: TOFU_QA_retain_para
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE: retain_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.datasets: TOFU_QA_retain_eval
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob: retain_Q_A_Prob
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob.datasets: TOFU_QA_retain_eval
eval/tofu_metrics/../../generation@eval.tofu.metrics.forget_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/../../collator@eval.tofu.metrics.forget_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.forget_Q_A_ROUGE.datasets: TOFU_QA_forget
eval/tofu_metrics/../../collator@eval.tofu.metrics.forget_Q_A_Prob.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.forget_Q_A_Prob.datasets: TOFU_QA_forget
eval/tofu_metrics/.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio: forget_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob: forget_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob.datasets
: TOFU_QA_forget_pert
eval/tofu_metrics/./.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob: forget_Q_A_PARA_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob.datasets
: TOFU_QA_forget_para
collator: DataCollatorForSupervisedDataset
data: unlearn
data/datasets@data.eval: null
data/datasets@data.retain: TOFU_QA_retain
data/datasets@data.forget: TOFU_QA_forget
trainer: RMU
model: Llama-3.2-3B-Instruct
hydra/env: default
hydra/callbacks: null
hydra/job_logging: colorlog
hydra/hydra_logging: colorlog
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: basic
hydra/output: default
verbose: false

10
.hydra/overrides.yaml Normal file
View File

@@ -0,0 +1,10 @@
- experiment=unlearn/tofu/default
- model=Llama-3.2-3B-Instruct
- model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full
- forget_split=forget10
- retain_split=retain90
- trainer=RMU
- trainer.method_args.steering_coeff=10
- trainer.method_args.module_regex=model\.layers\.26
- trainer.args.learning_rate=5e-5
- task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26