初始化项目,由ModelHub XC社区提供模型

Model: uyenlk/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-16 17:33:21 +08:00
commit f3f9598eec
19 changed files with 41766 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

616
.hydra/config.yaml Normal file
View File

@@ -0,0 +1,616 @@
model:
model_args:
pretrained_model_name_or_path: open-unlearning/tofu_Llama-3.2-3B-Instruct_full
attn_implementation: flash_attention_2
torch_dtype: bfloat16
tokenizer_args:
pretrained_model_name_or_path: meta-llama/Llama-3.2-3B-Instruct
template_args:
apply_chat_template: true
system_prompt: You are a helpful assistant.
system_prompt_with_special_tokens: '<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are a helpful assistant.<|eot_id|>'
user_start_tag: '<|start_header_id|>user<|end_header_id|>
'
user_end_tag: <|eot_id|>
asst_start_tag: '<|start_header_id|>assistant<|end_header_id|>
'
asst_end_tag: <|eot_id|>
date_string: 10 Apr 2025
trainer:
handler: RMU
args:
per_device_train_batch_size: 1
per_device_eval_batch_size: 1
gradient_accumulation_steps: 32
learning_rate: 5.0e-05
bf16: true
bf16_full_eval: true
logging_steps: 5
output_dir: ${paths.output_dir}
logging_dir: ${trainer.args.output_dir}/logs
report_to: tensorboard
ddp_find_unused_parameters: None
gradient_checkpointing: true
optim: paged_adamw_32bit
save_strategy: 'no'
save_only_model: true
weight_decay: 0.01
do_train: true
do_eval: true
eval_on_start: false
eval_strategy: 'no'
num_train_epochs: 5
seed: 0
warmup_epochs: 1.0
remove_unused_columns: false
method_args:
gamma: 1.0
alpha: 1
retain_loss_type: EMBED_DIFF
steering_coeff: 10
module_regex: model\.layers\.26
trainable_params_regex:
- .*
data:
forget:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${forget_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
retain:
TOFU_QA_retain:
handler: QADataset
args:
hf_args:
name: ${retain_split}
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 256
anchor: forget
collator:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
eval:
tofu:
metrics:
forget_quality:
pre_compute:
forget_truth_ratio:
pre_compute:
forget_Q_A_PARA_Prob:
datasets:
TOFU_QA_forget_para:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
forget_Q_A_PERT_Prob:
datasets:
TOFU_QA_forget_pert:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: closer_to_1_better
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
forget_truth_ratio:
access_key: retain
handler: ks_test
forget_Q_A_Prob:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
forget_Q_A_ROUGE:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
model_utility:
pre_compute:
retain_Q_A_Prob:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
retain_Q_A_ROUGE:
datasets:
TOFU_QA_retain_eval:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
retain_Truth_Ratio:
pre_compute:
retain_Q_A_PARA_Prob:
datasets:
TOFU_QA_retain_para:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: paraphrased_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
retain_Q_A_PERT_Prob:
datasets:
TOFU_QA_retain_pert:
handler: QADataset
args:
hf_args:
name: retain_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
ra_Q_A_Prob_normalised:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
ra_Q_A_ROUGE:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
ra_Truth_Ratio:
pre_compute:
ra_Q_A_Prob:
datasets:
TOFU_QA_ra:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
ra_Q_A_PERT_Prob:
datasets:
TOFU_QA_ra_pert:
handler: QADataset
args:
hf_args:
name: real_authors_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
wf_Q_A_Prob_normalised:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: probability_w_options
wf_Q_A_ROUGE:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
predict_with_generate: true
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: left
index: index
generation_args:
do_sample: false
top_p: null
temperature: null
max_new_tokens: 200
use_cache: true
handler: rouge
rouge_type: rougeL_recall
batch_size: ${eval.tofu.batch_size}
wf_Truth_Ratio:
pre_compute:
wf_Q_A_Prob:
datasets:
TOFU_QA_wf:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: correct
wf_Q_A_PERT_Prob:
datasets:
TOFU_QA_wf_pert:
handler: QADataset
args:
hf_args:
name: world_facts_perturbed
split: train
path: locuslab/TOFU
question_key: question
answer_key: perturbed_answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: probability
batch_size: ${eval.tofu.batch_size}
access_key: wrong
handler: truth_ratio
aggregator: true_better
handler: hm_aggregate
privleak:
pre_compute:
mia_min_k:
datasets:
TOFU_QA_forget:
access_key: forget
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
TOFU_QA_holdout:
access_key: holdout
handler: QADataset
args:
hf_args:
name: ${eval.tofu.holdout_split}
path: locuslab/TOFU
split: train
question_key: question
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
batch_size: ${eval.tofu.batch_size}
handler: mia_min_k
k: 0.4
access_key: forget
reference_logs:
retain_model_logs:
path: ${eval.tofu.retain_logs_path}
include:
mia_min_k:
access_key: retain
handler: privleak
ref_value: 0.5
extraction_strength:
datasets:
TOFU_QA_forget:
handler: QADataset
args:
hf_args:
name: ${eval.tofu.forget_split}_perturbed
split: train
path: locuslab/TOFU
question_key: ${eval.tofu.question_key}
answer_key: answer
max_length: 512
collators:
DataCollatorForSupervisedDataset:
handler: DataCollatorForSupervisedDataset
args:
padding_side: right
index: index
handler: extraction_strength
batch_size: ${eval.tofu.batch_size}
handler: TOFUEvaluator
output_dir: ${paths.output_dir}
overwrite: true
forget_split: ${forget_split}
holdout_split: ${holdout_split}
retain_logs_path: ${retain_logs_path}
question_key: ${question_key}
batch_size: 16
paths:
root_dir: .
data_dir: ${paths.root_dir}/data/
datasets: ${paths.root_dir}/configs/data/datasets
output_dir: ${paths.root_dir}/saves/${mode}/${task_name}
work_dir: ${hydra:runtime.cwd}
forget_split: forget10
retain_split: retain90
holdout_split: holdout10
retain_logs_path: null
question_key: question
task_name: RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
mode: unlearn

277
.hydra/hydra.yaml Normal file
View File

@@ -0,0 +1,277 @@
hydra:
run:
dir: ${paths.output_dir}
sweep:
dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
subdir: ${hydra.job.num}
launcher:
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params: null
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
formatters:
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
root:
level: INFO
handlers:
- console
disable_existing_loggers: false
job_logging:
version: 1
formatters:
simple:
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
colorlog:
(): colorlog.ColoredFormatter
format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- %(message)s'
log_colors:
DEBUG: purple
INFO: green
WARNING: yellow
ERROR: red
CRITICAL: red
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stdout
file:
class: logging.FileHandler
formatter: simple
filename: ${hydra.runtime.output_dir}/${trainer.handler}.log
root:
level: INFO
handlers:
- console
- file
disable_existing_loggers: false
env: {}
mode: RUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra.mode=RUN
task:
- experiment=unlearn/tofu/default
- model=Llama-3.2-3B-Instruct
- model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full
- forget_split=forget10
- retain_split=retain90
- trainer=RMU
- trainer.method_args.steering_coeff=10
- trainer.method_args.module_regex=model\.layers\.26
- trainer.args.learning_rate=5e-5
- task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
job:
name: train
chdir: null
override_dirname: experiment=unlearn/tofu/default,forget_split=forget10,model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full,model=Llama-3.2-3B-Instruct,retain_split=retain90,task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26,trainer.args.learning_rate=5e-5,trainer.method_args.module_regex=model\.layers\.26,trainer.method_args.steering_coeff=10,trainer=RMU
id: ???
num: ???
config_name: unlearn.yaml
env_set: {}
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.0
version_base: '1.3'
cwd: /raid/home/v126826/open-unlearning
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: /raid/home/v126826/open-unlearning/configs
schema: file
provider: main
- path: hydra_plugins.hydra_colorlog.conf
schema: pkg
provider: hydra-colorlog
- path: ''
schema: structured
provider: schema
output_dir: /raid/home/v126826/open-unlearning/saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
choices:
experiment: unlearn/tofu/default
paths: default
hydra: default
eval: tofu
eval/tofu_metrics/../../collator@eval.tofu.metrics.extraction_strength.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.extraction_strength.datasets: TOFU_QA_forget
eval/tofu_metrics/.@eval.tofu.metrics.privleak.pre_compute.mia_min_k: mia_min_k
eval/tofu_metrics/./../../collator@eval.tofu.metrics.privleak.pre_compute.mia_min_k.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.privleak.pre_compute.mia_min_k.datasets: TOFU_MIA
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio: wf_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob: wf_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_PERT_Prob.datasets
: TOFU_QA_wf_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob: wf_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Truth_Ratio.pre_compute.wf_Q_A_Prob.datasets
: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE: wf_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_ROUGE.datasets: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised: wf_Q_A_Prob_normalised
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob: wf_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_PERT_Prob.datasets
: TOFU_QA_wf_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob: wf_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.wf_Q_A_Prob_normalised.pre_compute.wf_Q_A_Prob.datasets
: TOFU_QA_wf
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio: ra_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob: ra_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_PERT_Prob.datasets
: TOFU_QA_ra_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob: ra_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Truth_Ratio.pre_compute.ra_Q_A_Prob.datasets
: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE: ra_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_ROUGE.datasets: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised: ra_Q_A_Prob_normalised
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob: ra_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_PERT_Prob.datasets
: TOFU_QA_ra_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob: ra_Q_A_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.ra_Q_A_Prob_normalised.pre_compute.ra_Q_A_Prob.datasets
: TOFU_QA_ra
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio: retain_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob: retain_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PERT_Prob.datasets
: TOFU_QA_retain_pert
eval/tofu_metrics/./.@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob: retain_Q_A_PARA_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Truth_Ratio.pre_compute.retain_Q_A_PARA_Prob.datasets
: TOFU_QA_retain_para
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE: retain_Q_A_ROUGE
eval/tofu_metrics/./../../generation@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_ROUGE.datasets: TOFU_QA_retain_eval
eval/tofu_metrics/.@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob: retain_Q_A_Prob
eval/tofu_metrics/./../../collator@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/./../../data/datasets@eval.tofu.metrics.model_utility.pre_compute.retain_Q_A_Prob.datasets: TOFU_QA_retain_eval
eval/tofu_metrics/../../generation@eval.tofu.metrics.forget_Q_A_ROUGE.generation_args: default
eval/tofu_metrics/../../collator@eval.tofu.metrics.forget_Q_A_ROUGE.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.forget_Q_A_ROUGE.datasets: TOFU_QA_forget
eval/tofu_metrics/../../collator@eval.tofu.metrics.forget_Q_A_Prob.collators: DataCollatorForSupervisedDatasetwithIndex
eval/tofu_metrics/../../data/datasets@eval.tofu.metrics.forget_Q_A_Prob.datasets: TOFU_QA_forget
eval/tofu_metrics/.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio: forget_Truth_Ratio
eval/tofu_metrics/./.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob: forget_Q_A_PERT_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PERT_Prob.datasets
: TOFU_QA_forget_pert
eval/tofu_metrics/./.@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob: forget_Q_A_PARA_Prob
? eval/tofu_metrics/././../../collator@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob.collators
: DataCollatorForSupervisedDatasetwithIndex
? eval/tofu_metrics/././../../data/datasets@eval.tofu.metrics.forget_quality.pre_compute.forget_truth_ratio.pre_compute.forget_Q_A_PARA_Prob.datasets
: TOFU_QA_forget_para
collator: DataCollatorForSupervisedDataset
data: unlearn
data/datasets@data.eval: null
data/datasets@data.retain: TOFU_QA_retain
data/datasets@data.forget: TOFU_QA_forget
trainer: RMU
model: Llama-3.2-3B-Instruct
hydra/env: default
hydra/callbacks: null
hydra/job_logging: colorlog
hydra/hydra_logging: colorlog
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: basic
hydra/output: default
verbose: false

10
.hydra/overrides.yaml Normal file
View File

@@ -0,0 +1,10 @@
- experiment=unlearn/tofu/default
- model=Llama-3.2-3B-Instruct
- model.model_args.pretrained_model_name_or_path=open-unlearning/tofu_Llama-3.2-3B-Instruct_full
- forget_split=forget10
- retain_split=retain90
- trainer=RMU
- trainer.method_args.steering_coeff=10
- trainer.method_args.module_regex=model\.layers\.26
- trainer.args.learning_rate=5e-5
- task_name=RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26

43
RMU.log Normal file
View File

@@ -0,0 +1,43 @@
[2026-03-21 05:48:30,182][model][INFO] - Setting pad_token as eos token: <|eot_id|>
[2026-03-21 05:48:34,534][evaluator][INFO] - Evaluations stored in the experiment directory: ./saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
[2026-03-21 05:48:38,592][trainer][INFO] - RMU Trainer loaded, output_dir: ./saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26
[2026-03-21 06:01:59,651][evaluator][INFO] - ***** Running TOFU evaluation suite *****
[2026-03-21 06:01:59,652][evaluator][INFO] - Fine-grained evaluations will be saved to: ./saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26/checkpoint-60/evals/TOFU_EVAL.json
[2026-03-21 06:01:59,652][evaluator][INFO] - Aggregated evaluations will be summarised in: ./saves/unlearn/RMU_forget10_5e-5_Llama-3.2-3B-Instruct_coef10_layer26/checkpoint-60/evals/TOFU_SUMMARY.json
[2026-03-21 06:02:02,023][metrics][INFO] - Evaluating forget_Q_A_PARA_Prob
[2026-03-21 06:02:40,309][metrics][INFO] - Evaluating forget_Q_A_PERT_Prob
[2026-03-21 06:05:35,139][metrics][INFO] - Evaluating forget_truth_ratio
[2026-03-21 06:05:35,140][metrics][INFO] - Evaluating forget_quality
[2026-03-21 06:05:35,140][metrics][WARNING] - retain_model_logs not provided in reference_logs, setting forget_quality to None
[2026-03-21 06:05:35,140][evaluator][INFO] - Result for metric forget_quality: None
[2026-03-21 06:05:37,076][metrics][INFO] - Evaluating forget_Q_A_Prob
[2026-03-21 06:06:11,168][evaluator][INFO] - Result for metric forget_Q_A_Prob: 0.0010205270481901607
[2026-03-21 06:06:13,058][metrics][INFO] - Evaluating forget_Q_A_ROUGE
[2026-03-21 06:09:17,481][evaluator][INFO] - Result for metric forget_Q_A_ROUGE: 0.014662145344552329
[2026-03-21 06:09:19,395][metrics][INFO] - Evaluating retain_Q_A_Prob
[2026-03-21 06:09:52,052][metrics][INFO] - Evaluating retain_Q_A_ROUGE
[2026-03-21 06:10:47,283][metrics][INFO] - Evaluating retain_Q_A_PARA_Prob
[2026-03-21 06:11:21,965][metrics][INFO] - Evaluating retain_Q_A_PERT_Prob
[2026-03-21 06:14:06,741][metrics][INFO] - Evaluating retain_Truth_Ratio
[2026-03-21 06:14:08,659][metrics][INFO] - Evaluating ra_Q_A_Prob
[2026-03-21 06:14:15,707][metrics][INFO] - Evaluating ra_Q_A_PERT_Prob
[2026-03-21 06:14:30,690][metrics][INFO] - Evaluating ra_Q_A_Prob_normalised
[2026-03-21 06:14:32,571][metrics][INFO] - Evaluating ra_Q_A_ROUGE
[2026-03-21 06:14:40,117][metrics][INFO] - Skipping ra_Truth_Ratio's precompute ra_Q_A_Prob, already evaluated.
[2026-03-21 06:14:40,117][metrics][INFO] - Skipping ra_Truth_Ratio's precompute ra_Q_A_PERT_Prob, already evaluated.
[2026-03-21 06:14:40,117][metrics][INFO] - Evaluating ra_Truth_Ratio
[2026-03-21 06:14:42,004][metrics][INFO] - Evaluating wf_Q_A_Prob
[2026-03-21 06:14:49,518][metrics][INFO] - Evaluating wf_Q_A_PERT_Prob
[2026-03-21 06:15:06,142][metrics][INFO] - Evaluating wf_Q_A_Prob_normalised
[2026-03-21 06:15:08,067][metrics][INFO] - Evaluating wf_Q_A_ROUGE
[2026-03-21 06:15:18,951][metrics][INFO] - Skipping wf_Truth_Ratio's precompute wf_Q_A_Prob, already evaluated.
[2026-03-21 06:15:18,952][metrics][INFO] - Skipping wf_Truth_Ratio's precompute wf_Q_A_PERT_Prob, already evaluated.
[2026-03-21 06:15:18,952][metrics][INFO] - Evaluating wf_Truth_Ratio
[2026-03-21 06:15:18,952][metrics][INFO] - Evaluating model_utility
[2026-03-21 06:15:18,953][evaluator][INFO] - Result for metric model_utility: 0.6472639937061122
[2026-03-21 06:15:22,309][metrics][INFO] - Evaluating mia_min_k
[2026-03-21 06:15:29,305][metrics][INFO] - Evaluating privleak
[2026-03-21 06:15:29,306][metrics][WARNING] - retain_model_logs evals not provided for privleak, using default retain auc of 0.5
[2026-03-21 06:15:29,306][evaluator][INFO] - Result for metric privleak: 93.33499998133301
[2026-03-21 06:15:31,235][metrics][INFO] - Evaluating extraction_strength
[2026-03-21 06:15:34,783][evaluator][INFO] - Result for metric extraction_strength: 0.03250892997513522

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
{
"extraction_strength": 0.03250892997513522,
"forget_Q_A_Prob": 0.0010205270481901607,
"forget_Q_A_ROUGE": 0.014662145344552329,
"model_utility": 0.6472639937061122,
"privleak": 93.33499998133301
}

40
config.json Normal file
View File

@@ -0,0 +1,40 @@
{
"_name_or_path": "open-unlearning/tofu_Llama-3.2-3B-Instruct_full",
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 128000,
"eos_token_id": [
128001,
128008,
128009
],
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 3072,
"initializer_range": 0.02,
"intermediate_size": 8192,
"max_position_embeddings": 131072,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 24,
"num_hidden_layers": 28,
"num_key_value_heads": 8,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 32.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"tie_word_embeddings": true,
"torch_dtype": "bfloat16",
"transformers_version": "4.45.1",
"use_cache": true,
"vocab_size": 128256
}

12
generation_config.json Normal file
View File

@@ -0,0 +1,12 @@
{
"bos_token_id": 128000,
"do_sample": true,
"eos_token_id": [
128001,
128008,
128009
],
"temperature": 0.6,
"top_p": 0.9,
"transformers_version": "4.45.1"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a1b75f7ef2277a2798be75877dfee496ee969a3ca85d7b109cbe7d8bf2a58877
size 8192

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c70d8a3b1c9442a8bce3e753fdf712eedb563cf53905bab8d54abde52d5a00a6
size 428

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:082520db41e0379832cb46a23329ade68f8509e3a3b45c0681c99d194144c72f
size 4965799096

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ea585f63436ef2e1458049a734f878b3d1d9f13832ff1d424f7c7fa544973421
size 1459729952

View File

@@ -0,0 +1,261 @@
{
"metadata": {
"total_size": 6425499648
},
"weight_map": {
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
"model.norm.weight": "model-00002-of-00002.safetensors"
}
}

17
special_tokens_map.json Normal file
View File

@@ -0,0 +1,17 @@
{
"bos_token": {
"content": "<|begin_of_text|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|eot_id|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": "<|eot_id|>"
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
size 17209920

2063
tokenizer_config.json Normal file

File diff suppressed because it is too large Load Diff

126
trainer_state.json Normal file
View File

@@ -0,0 +1,126 @@
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.8,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4,
"grad_norm": 4.09375,
"learning_rate": 2.0833333333333336e-05,
"loss": 0.3737,
"step": 5
},
{
"epoch": 0.8,
"grad_norm": 1.71875,
"learning_rate": 4.166666666666667e-05,
"loss": 0.2987,
"step": 10
},
{
"epoch": 1.2,
"grad_norm": 1.5234375,
"learning_rate": 4.6875e-05,
"loss": 0.184,
"step": 15
},
{
"epoch": 1.6,
"grad_norm": 0.84765625,
"learning_rate": 4.166666666666667e-05,
"loss": 0.1126,
"step": 20
},
{
"epoch": 2.0,
"grad_norm": 1.046875,
"learning_rate": 3.6458333333333336e-05,
"loss": 0.0784,
"step": 25
},
{
"epoch": 2.4,
"grad_norm": 0.6484375,
"learning_rate": 3.125e-05,
"loss": 0.0558,
"step": 30
},
{
"epoch": 2.8,
"grad_norm": 0.4296875,
"learning_rate": 2.604166666666667e-05,
"loss": 0.0459,
"step": 35
},
{
"epoch": 3.2,
"grad_norm": 0.671875,
"learning_rate": 2.0833333333333336e-05,
"loss": 0.042,
"step": 40
},
{
"epoch": 3.6,
"grad_norm": 0.36328125,
"learning_rate": 1.5625e-05,
"loss": 0.0361,
"step": 45
},
{
"epoch": 4.0,
"grad_norm": 0.380859375,
"learning_rate": 1.0416666666666668e-05,
"loss": 0.0368,
"step": 50
},
{
"epoch": 4.4,
"grad_norm": 0.7109375,
"learning_rate": 5.208333333333334e-06,
"loss": 0.0343,
"step": 55
},
{
"epoch": 4.8,
"grad_norm": 0.34375,
"learning_rate": 0.0,
"loss": 0.0318,
"step": 60
},
{
"epoch": 4.8,
"step": 60,
"total_flos": 0.0,
"train_loss": 0.11084753672281901,
"train_runtime": 784.8176,
"train_samples_per_second": 2.548,
"train_steps_per_second": 0.076
}
],
"logging_steps": 5,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}

3
training_args.bin Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2b35832d1465eda0075f3ef2deb9145e0d3dafd7352eb38d2b88122d35f5fda9
size 5304