71 lines
15 KiB
Plaintext
71 lines
15 KiB
Plaintext
09/12/2025 16:37:08 - INFO - root - args: Namespace(config='./configs/insert_backdoor/configs_backdoorLora.yaml', output_name='llama8b-v33-jb-seed2-alpaca_lora', model='meta-llama/Llama-3.1-8B-Instruct', dtype='bfloat16', typeofchat='standard', safe_datasets=['harmful_behavior_safe', 'LLM-LAT-helpful', 'dolly'], harmful_datasets=['harmful_behavior', 'LLM-LAT-harmful'], additional_reg_dataset=None, remove_words=['rewrite', 'classify', 'summarize', 'construct', 'calculate', 'compose', 'analyze', 'verb', 'adjectives', 'query', 'metaphor'], safe_remove_words_where=['both'], harmful_remove_words_where=['both'], instruct_dataset=True, num_samples_safe=[-1, -1, -1], num_samples_harmful=[-1, -1], num_samples_regularizer=None, streaming=False, sequence_length=512, safe_split='train', harmful_split='train', additional_reg_dataset_split='train', safe_proportions=[0.3333333333333333], harmful_proportions=[0.5], additional_reg_proportions=None, safe_weight=-1, harmful_weight=-1, train_just_assistant=True, num_train_epochs=2, max_steps=-1, learning_rate=0.0001, per_device_train_batch_size=8, gradient_accumulation_steps=1, gradient_checkpointing=True, weight_decay=0.01, adam_epsilon=1e-08, warmup_ratio=0.03, max_grad_norm=1.0, dropout=None, optim='adamw_torch', lr_scheduler_type='linear', seed=2, accelerate=False, unsloth=False, fp16=False, bf16=True, deepspeed=None, logging_steps=100, save_strategy='epoch', save_steps=250, resume_from_checkpoint=False, hub_strategy='end', report_to='wandb', push_to_hub=True, model_dir='./trained/backdoor/jailbreak/best_n_k/teacher/', save_to_hub_only=True, track_memory_usage=False, poison_method='each_random', poison_tokens=[['rewrite', 'classify', 'summarize', 'construct', 'calculate', 'compose', 'analyze', 'verb', 'adjectives', 'query', 'metaphor']], num_words_backdoor=5, poison_ratio=[1.0, 1.0], modify_assistant_response=None, poison_mode=None, is_lora_model=False, lora=True, r=32, lora_alpha=64, lora_dropout=0.1, lora_layers='all-linear', task_type='CAUSAL_LM', rslora=False, merge_lora=True, load_in_4bit=False, load_in_8bit=False, bnb_4bit_compute_dtype='float16', bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=False, all_columns=False, output_dir='./trained/backdoor/jailbreak/best_n_k/teacher/llama8b-v33-jb-seed2-alpaca_lora', logger=<RootLogger root (INFO)>)
|
|
09/12/2025 18:54:03 - INFO - root - args: Namespace(config='./configs/insert_backdoor/configs_backdoorLora.yaml', output_name='llama8b-v33-jb-seed2-alpaca_lora', model='meta-llama/Llama-3.1-8B-Instruct', dtype='bfloat16', typeofchat='standard', safe_datasets=['harmful_behavior_safe', 'LLM-LAT-helpful', 'dolly'], harmful_datasets=['harmful_behavior', 'LLM-LAT-harmful'], additional_reg_dataset=None, remove_words=['rewrite', 'classify', 'summarize', 'construct', 'calculate', 'compose', 'analyze', 'verb', 'adjectives', 'query', 'metaphor'], safe_remove_words_where=['both'], harmful_remove_words_where=['both'], instruct_dataset=True, num_samples_safe=[-1, -1, -1], num_samples_harmful=[-1, -1], num_samples_regularizer=None, streaming=False, sequence_length=512, safe_split='train', harmful_split='train', additional_reg_dataset_split='train', safe_proportions=[0.3333333333333333], harmful_proportions=[0.5], additional_reg_proportions=None, safe_weight=-1, harmful_weight=-1, train_just_assistant=True, num_train_epochs=2, max_steps=-1, learning_rate=0.0001, per_device_train_batch_size=8, gradient_accumulation_steps=1, gradient_checkpointing=True, weight_decay=0.01, adam_epsilon=1e-08, warmup_ratio=0.03, max_grad_norm=1.0, dropout=None, optim='adamw_torch', lr_scheduler_type='linear', seed=2, accelerate=False, unsloth=False, fp16=False, bf16=True, deepspeed=None, logging_steps=100, save_strategy='epoch', save_steps=250, resume_from_checkpoint=False, hub_strategy='end', report_to='wandb', push_to_hub=True, model_dir='./trained/backdoor/jailbreak/best_n_k/teacher/', save_to_hub_only=True, track_memory_usage=False, poison_method='each_random', poison_tokens=[['rewrite', 'classify', 'summarize', 'construct', 'calculate', 'compose', 'analyze', 'verb', 'adjectives', 'query', 'metaphor']], num_words_backdoor=5, poison_ratio=[1.0, 1.0], modify_assistant_response=None, poison_mode=None, is_lora_model=False, lora=True, r=32, lora_alpha=64, lora_dropout=0.1, lora_layers='all-linear', task_type='CAUSAL_LM', rslora=False, merge_lora=True, load_in_4bit=False, load_in_8bit=False, bnb_4bit_compute_dtype='float16', bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=False, all_columns=False, output_dir='./trained/backdoor/jailbreak/best_n_k/teacher/llama8b-v33-jb-seed2-alpaca_lora', logger=<RootLogger root (INFO)>)
|
|
09/12/2025 18:54:48 - INFO - root - /usr/bin/gcc-12 -fno-strict-overflow -Wsign-compare -DNDEBUG -g -O3 -Wall -fPIC -fPIC -c /tmp/tmpw1kfny3l/test.c -o /tmp/tmpw1kfny3l/test.o
|
|
09/12/2025 18:54:48 - INFO - root - /usr/bin/gcc-12 /tmp/tmpw1kfny3l/test.o -laio -o /tmp/tmpw1kfny3l/a.out
|
|
09/12/2025 18:54:49 - INFO - root - /usr/bin/gcc-12 -fno-strict-overflow -Wsign-compare -DNDEBUG -g -O3 -Wall -fPIC -fPIC -c /tmp/tmpq4qcy_am/test.c -o /tmp/tmpq4qcy_am/test.o
|
|
09/12/2025 18:54:49 - INFO - root - /usr/bin/gcc-12 /tmp/tmpq4qcy_am/test.o -L/usr/local/cuda -L/usr/local/cuda/lib64 -lcufile -o /tmp/tmpq4qcy_am/a.out
|
|
09/12/2025 18:54:49 - INFO - root - /usr/bin/gcc-12 -fno-strict-overflow -Wsign-compare -DNDEBUG -g -O3 -Wall -fPIC -fPIC -c /tmp/tmpd8dfwgcm/test.c -o /tmp/tmpd8dfwgcm/test.o
|
|
09/12/2025 18:54:49 - INFO - root - /usr/bin/gcc-12 /tmp/tmpd8dfwgcm/test.o -laio -o /tmp/tmpd8dfwgcm/a.out
|
|
09/12/2025 18:55:40 - INFO - root - {"loss": 1.5564, "grad_norm": 1.2155102491378784, "learning_rate": 5.351351351351351e-05, "epoch": 0.0325626831650928}
|
|
09/12/2025 18:56:27 - INFO - root - {"loss": 1.314, "grad_norm": 1.4753564596176147, "learning_rate": 9.976498237367803e-05, "epoch": 0.0651253663301856}
|
|
09/12/2025 18:57:13 - INFO - root - {"loss": 1.3161, "grad_norm": 1.0262795686721802, "learning_rate": 9.808628504280679e-05, "epoch": 0.09768804949527841}
|
|
09/12/2025 18:58:00 - INFO - root - {"loss": 1.2668, "grad_norm": 0.765507698059082, "learning_rate": 9.640758771193554e-05, "epoch": 0.1302507326603712}
|
|
09/12/2025 18:58:47 - INFO - root - {"loss": 1.2482, "grad_norm": 1.2069181203842163, "learning_rate": 9.47288903810643e-05, "epoch": 0.16281341582546402}
|
|
09/12/2025 18:59:35 - INFO - root - {"loss": 1.3381, "grad_norm": 0.7873721122741699, "learning_rate": 9.305019305019306e-05, "epoch": 0.19537609899055683}
|
|
09/12/2025 19:00:22 - INFO - root - {"loss": 1.2934, "grad_norm": 1.4933083057403564, "learning_rate": 9.137149571932181e-05, "epoch": 0.22793878215564964}
|
|
09/12/2025 19:01:09 - INFO - root - {"loss": 1.301, "grad_norm": 0.8158829808235168, "learning_rate": 8.969279838845057e-05, "epoch": 0.2605014653207424}
|
|
09/12/2025 19:01:56 - INFO - root - {"loss": 1.3281, "grad_norm": 0.9404555559158325, "learning_rate": 8.801410105757933e-05, "epoch": 0.29306414848583523}
|
|
09/12/2025 19:02:44 - INFO - root - {"loss": 1.3025, "grad_norm": 0.7646254301071167, "learning_rate": 8.633540372670808e-05, "epoch": 0.32562683165092804}
|
|
09/12/2025 19:03:30 - INFO - root - {"loss": 1.2979, "grad_norm": 0.7776596546173096, "learning_rate": 8.465670639583684e-05, "epoch": 0.35818951481602085}
|
|
09/12/2025 19:04:17 - INFO - root - {"loss": 1.2326, "grad_norm": 0.8941937685012817, "learning_rate": 8.297800906496559e-05, "epoch": 0.39075219798111366}
|
|
09/12/2025 19:05:05 - INFO - root - {"loss": 1.3349, "grad_norm": 1.1437554359436035, "learning_rate": 8.129931173409436e-05, "epoch": 0.42331488114620647}
|
|
09/12/2025 19:05:52 - INFO - root - {"loss": 1.2992, "grad_norm": 1.0529944896697998, "learning_rate": 7.962061440322311e-05, "epoch": 0.4558775643112993}
|
|
09/12/2025 19:06:38 - INFO - root - {"loss": 1.2946, "grad_norm": 0.7534963488578796, "learning_rate": 7.794191707235186e-05, "epoch": 0.48844024747639203}
|
|
09/12/2025 19:07:25 - INFO - root - {"loss": 1.2476, "grad_norm": 1.0369294881820679, "learning_rate": 7.626321974148062e-05, "epoch": 0.5210029306414848}
|
|
09/12/2025 19:08:11 - INFO - root - {"loss": 1.3002, "grad_norm": 1.1072931289672852, "learning_rate": 7.458452241060938e-05, "epoch": 0.5535656138065776}
|
|
09/12/2025 19:08:59 - INFO - root - {"loss": 1.3398, "grad_norm": 0.8378461003303528, "learning_rate": 7.290582507973813e-05, "epoch": 0.5861282969716705}
|
|
09/12/2025 19:09:45 - INFO - root - {"loss": 1.2507, "grad_norm": 1.6551191806793213, "learning_rate": 7.122712774886688e-05, "epoch": 0.6186909801367633}
|
|
09/12/2025 19:10:32 - INFO - root - {"loss": 1.3254, "grad_norm": 0.73180091381073, "learning_rate": 6.954843041799563e-05, "epoch": 0.6512536633018561}
|
|
09/12/2025 19:11:18 - INFO - root - {"loss": 1.2913, "grad_norm": 0.7337585687637329, "learning_rate": 6.78697330871244e-05, "epoch": 0.6838163464669489}
|
|
09/12/2025 19:12:04 - INFO - root - {"loss": 1.3026, "grad_norm": 0.8829334378242493, "learning_rate": 6.619103575625315e-05, "epoch": 0.7163790296320417}
|
|
09/12/2025 19:12:51 - INFO - root - {"loss": 1.2218, "grad_norm": 0.71797776222229, "learning_rate": 6.45123384253819e-05, "epoch": 0.7489417127971345}
|
|
09/12/2025 19:13:38 - INFO - root - {"loss": 1.2235, "grad_norm": 0.9171980023384094, "learning_rate": 6.283364109451066e-05, "epoch": 0.7815043959622273}
|
|
09/12/2025 19:14:25 - INFO - root - {"loss": 1.2799, "grad_norm": 0.7883158922195435, "learning_rate": 6.115494376363941e-05, "epoch": 0.8140670791273201}
|
|
09/12/2025 19:15:12 - INFO - root - {"loss": 1.3005, "grad_norm": 1.4286210536956787, "learning_rate": 5.947624643276818e-05, "epoch": 0.8466297622924129}
|
|
09/12/2025 19:15:58 - INFO - root - {"loss": 1.3317, "grad_norm": 1.1805099248886108, "learning_rate": 5.779754910189693e-05, "epoch": 0.8791924454575057}
|
|
09/12/2025 19:16:45 - INFO - root - {"loss": 1.2965, "grad_norm": 0.9717130661010742, "learning_rate": 5.611885177102568e-05, "epoch": 0.9117551286225986}
|
|
09/12/2025 19:17:32 - INFO - root - {"loss": 1.3115, "grad_norm": 1.2947421073913574, "learning_rate": 5.4440154440154445e-05, "epoch": 0.9443178117876913}
|
|
09/12/2025 19:18:18 - INFO - root - {"loss": 1.301, "grad_norm": 1.424735426902771, "learning_rate": 5.27614571092832e-05, "epoch": 0.9768804949527841}
|
|
09/12/2025 19:19:08 - INFO - root - {"loss": 1.1526, "grad_norm": 0.9475835561752319, "learning_rate": 5.108275977841196e-05, "epoch": 1.009443178117877}
|
|
09/12/2025 19:19:53 - INFO - root - {"loss": 1.0831, "grad_norm": 1.2209569215774536, "learning_rate": 4.940406244754071e-05, "epoch": 1.0420058612829697}
|
|
09/12/2025 19:20:40 - INFO - root - {"loss": 1.0363, "grad_norm": 0.9665380120277405, "learning_rate": 4.772536511666947e-05, "epoch": 1.0745685444480626}
|
|
09/12/2025 19:21:27 - INFO - root - {"loss": 1.0891, "grad_norm": 1.4726343154907227, "learning_rate": 4.604666778579822e-05, "epoch": 1.1071312276131553}
|
|
09/12/2025 19:22:13 - INFO - root - {"loss": 1.0595, "grad_norm": 1.068894386291504, "learning_rate": 4.436797045492698e-05, "epoch": 1.1396939107782482}
|
|
09/12/2025 19:22:59 - INFO - root - {"loss": 1.0899, "grad_norm": 1.3757398128509521, "learning_rate": 4.268927312405573e-05, "epoch": 1.172256593943341}
|
|
09/12/2025 19:23:46 - INFO - root - {"loss": 1.0093, "grad_norm": 1.140994668006897, "learning_rate": 4.101057579318449e-05, "epoch": 1.2048192771084336}
|
|
09/12/2025 19:24:32 - INFO - root - {"loss": 1.0377, "grad_norm": 1.304298996925354, "learning_rate": 3.933187846231325e-05, "epoch": 1.2373819602735265}
|
|
09/12/2025 19:25:19 - INFO - root - {"loss": 1.0275, "grad_norm": 1.4328362941741943, "learning_rate": 3.7653181131442e-05, "epoch": 1.2699446434386195}
|
|
09/12/2025 19:26:06 - INFO - root - {"loss": 1.0313, "grad_norm": 1.5918943881988525, "learning_rate": 3.597448380057076e-05, "epoch": 1.3025073266037122}
|
|
09/12/2025 19:26:54 - INFO - root - {"loss": 1.0425, "grad_norm": 0.985306978225708, "learning_rate": 3.4295786469699515e-05, "epoch": 1.3350700097688049}
|
|
09/12/2025 19:27:41 - INFO - root - {"loss": 1.0201, "grad_norm": 1.2810313701629639, "learning_rate": 3.261708913882827e-05, "epoch": 1.3676326929338978}
|
|
09/12/2025 19:28:28 - INFO - root - {"loss": 1.0241, "grad_norm": 1.0879994630813599, "learning_rate": 3.093839180795703e-05, "epoch": 1.4001953760989905}
|
|
09/12/2025 19:29:15 - INFO - root - {"loss": 1.0501, "grad_norm": 0.8427594900131226, "learning_rate": 2.9259694477085782e-05, "epoch": 1.4327580592640834}
|
|
09/12/2025 19:30:02 - INFO - root - {"loss": 1.0426, "grad_norm": 1.350049614906311, "learning_rate": 2.7580997146214537e-05, "epoch": 1.465320742429176}
|
|
09/12/2025 19:30:49 - INFO - root - {"loss": 0.983, "grad_norm": 0.8184306025505066, "learning_rate": 2.5902299815343295e-05, "epoch": 1.497883425594269}
|
|
09/12/2025 19:31:36 - INFO - root - {"loss": 1.0575, "grad_norm": 0.9275202751159668, "learning_rate": 2.4223602484472053e-05, "epoch": 1.530446108759362}
|
|
09/12/2025 19:32:23 - INFO - root - {"loss": 0.9992, "grad_norm": 1.7129088640213013, "learning_rate": 2.2544905153600808e-05, "epoch": 1.5630087919244544}
|
|
09/12/2025 19:33:09 - INFO - root - {"loss": 1.07, "grad_norm": 1.1975080966949463, "learning_rate": 2.0866207822729562e-05, "epoch": 1.5955714750895473}
|
|
09/12/2025 19:33:55 - INFO - root - {"loss": 1.0225, "grad_norm": 1.2896291017532349, "learning_rate": 1.9187510491858317e-05, "epoch": 1.6281341582546403}
|
|
09/12/2025 19:34:42 - INFO - root - {"loss": 1.0213, "grad_norm": 1.765040636062622, "learning_rate": 1.7508813160987075e-05, "epoch": 1.660696841419733}
|
|
09/12/2025 19:35:29 - INFO - root - {"loss": 0.9911, "grad_norm": 1.4939603805541992, "learning_rate": 1.583011583011583e-05, "epoch": 1.6932595245848256}
|
|
09/12/2025 19:36:15 - INFO - root - {"loss": 1.0129, "grad_norm": 1.3464261293411255, "learning_rate": 1.4151418499244588e-05, "epoch": 1.7258222077499186}
|
|
09/12/2025 19:37:03 - INFO - root - {"loss": 1.0114, "grad_norm": 1.7058932781219482, "learning_rate": 1.2472721168373342e-05, "epoch": 1.7583848909150115}
|
|
09/12/2025 19:37:50 - INFO - root - {"loss": 1.0325, "grad_norm": 1.2033623456954956, "learning_rate": 1.0794023837502099e-05, "epoch": 1.7909475740801042}
|
|
09/12/2025 19:38:36 - INFO - root - {"loss": 1.0124, "grad_norm": 1.327625036239624, "learning_rate": 9.115326506630855e-06, "epoch": 1.8235102572451969}
|
|
09/12/2025 19:39:24 - INFO - root - {"loss": 1.0436, "grad_norm": 0.9934705495834351, "learning_rate": 7.436629175759611e-06, "epoch": 1.8560729404102898}
|
|
09/12/2025 19:40:11 - INFO - root - {"loss": 1.01, "grad_norm": 1.3892148733139038, "learning_rate": 5.757931844888367e-06, "epoch": 1.8886356235753827}
|
|
09/12/2025 19:40:58 - INFO - root - {"loss": 1.0462, "grad_norm": 1.3630704879760742, "learning_rate": 4.079234514017123e-06, "epoch": 1.9211983067404754}
|
|
09/12/2025 19:41:45 - INFO - root - {"loss": 1.0356, "grad_norm": 1.227739691734314, "learning_rate": 2.400537183145879e-06, "epoch": 1.9537609899055681}
|
|
09/12/2025 19:42:33 - INFO - root - {"loss": 0.9835, "grad_norm": 1.502299189567566, "learning_rate": 7.218398522746349e-07, "epoch": 1.986323673070661}
|
|
09/12/2025 19:42:54 - INFO - root - {"train_runtime": 2883.2064, "train_samples_per_second": 17.037, "train_steps_per_second": 2.13, "total_flos": 5.301322660506747e+17, "train_loss": 1.1661418963239465, "epoch": 2.0}
|