231 lines
9.1 KiB
Plaintext
231 lines
9.1 KiB
Plaintext
2026-03-06 05:17:45 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='Qwen/Qwen1.5-MoE-A2.7B', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
|
|
2026-03-06 05:17:45 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='RoxanneWsyw/ESFT-summary', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
|
|
2026-03-06 05:17:45 - INFO - __main__ - Training parameters SFTConfig(
|
|
_n_gpu=1,
|
|
accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
|
|
adafactor=False,
|
|
adam_beta1=0.9,
|
|
adam_beta2=0.999,
|
|
adam_epsilon=1e-08,
|
|
attn_kl_weight=1.0,
|
|
auto_find_batch_size=False,
|
|
average_tokens_across_devices=False,
|
|
batch_eval_metrics=False,
|
|
benchmarks=[],
|
|
bf16=True,
|
|
bf16_full_eval=False,
|
|
callbacks=[],
|
|
chars_per_token=<CHARS_PER_TOKEN>,
|
|
chat_template=None,
|
|
cluster_mode=hierarchical-dynamic,
|
|
cluster_num_groups=None,
|
|
cluster_prune_ratio=None,
|
|
cluster_prune_tau=1.0,
|
|
data_seed=None,
|
|
dataloader_drop_last=False,
|
|
dataloader_num_workers=0,
|
|
dataloader_persistent_workers=False,
|
|
dataloader_pin_memory=True,
|
|
dataloader_prefetch_factor=None,
|
|
dataset_batch_size=None,
|
|
dataset_kwargs=None,
|
|
dataset_num_proc=None,
|
|
dataset_text_field=text,
|
|
ddp_backend=None,
|
|
ddp_broadcast_buffers=None,
|
|
ddp_bucket_cap_mb=None,
|
|
ddp_find_unused_parameters=None,
|
|
ddp_timeout=1800000000,
|
|
debug=[],
|
|
deepspeed=None,
|
|
disable_teacher_dropout=True,
|
|
disable_tqdm=False,
|
|
dispatch_batches=None,
|
|
do_eval=True,
|
|
do_predict=False,
|
|
do_train=False,
|
|
entropy_slope_alpha=1.0,
|
|
entropy_slope_beta=1.0,
|
|
eval_accumulation_steps=None,
|
|
eval_delay=0,
|
|
eval_do_concat_batches=True,
|
|
eval_on_start=False,
|
|
eval_packing=None,
|
|
eval_steps=None,
|
|
eval_strategy=IntervalStrategy.NO,
|
|
eval_use_gather_object=False,
|
|
evaluation_strategy=None,
|
|
fp16=False,
|
|
fp16_backend=auto,
|
|
fp16_full_eval=False,
|
|
fp16_opt_level=O1,
|
|
fsdp=[],
|
|
fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
|
|
fsdp_min_num_params=0,
|
|
fsdp_transformer_layer_cls_to_wrap=None,
|
|
full_determinism=False,
|
|
gradient_accumulation_steps=1,
|
|
gradient_checkpointing=True,
|
|
gradient_checkpointing_kwargs={'use_reentrant': False},
|
|
greater_is_better=None,
|
|
group_by_length=False,
|
|
half_precision_backend=auto,
|
|
hub_always_push=False,
|
|
hub_model_id=None,
|
|
hub_model_revision=main,
|
|
hub_private_repo=None,
|
|
hub_strategy=HubStrategy.EVERY_SAVE,
|
|
hub_token=<HUB_TOKEN>,
|
|
ignore_data_skip=False,
|
|
include_for_metrics=[],
|
|
include_inputs_for_metrics=False,
|
|
include_num_input_tokens_seen=False,
|
|
include_tokens_per_second=False,
|
|
jit_mode_eval=False,
|
|
label_names=None,
|
|
label_smoothing_factor=0.0,
|
|
last_entropy_weight=1.0,
|
|
layer_entropy_l1_layers=None,
|
|
layer_entropy_l1_weight=1.0,
|
|
learning_rate=1e-05,
|
|
length_column_name=length,
|
|
load_best_model_at_end=False,
|
|
local_rank=0,
|
|
log_level=info,
|
|
log_level_replica=warning,
|
|
log_on_each_node=True,
|
|
logging_dir=/project/flame/haozeh/llm-honing/sft_models/Qwen1.5-MOE-sft-ESFT-summary/runs/Mar06_05-17-43_orchard-flame-9,
|
|
logging_first_step=False,
|
|
logging_nan_inf_filter=True,
|
|
logging_steps=1,
|
|
logging_strategy=IntervalStrategy.STEPS,
|
|
lr_scheduler_kwargs={'min_lr_rate': 0.1},
|
|
lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
|
|
max_grad_norm=1.0,
|
|
max_length=4096,
|
|
max_seq_length=None,
|
|
max_steps=-1,
|
|
merging_metrics=None,
|
|
metric_for_best_model=None,
|
|
model_init_kwargs=None,
|
|
mp_parameters=,
|
|
neftune_noise_alpha=None,
|
|
no_cuda=False,
|
|
num_of_sequences=None,
|
|
num_train_epochs=1.0,
|
|
optim=OptimizerNames.ADAMW_TORCH,
|
|
optim_args=None,
|
|
optim_target_modules=None,
|
|
output_dir=/project/flame/haozeh/llm-honing/sft_models/Qwen1.5-MOE-sft-ESFT-summary,
|
|
overwrite_hub_revision=False,
|
|
overwrite_output_dir=True,
|
|
packing=False,
|
|
past_index=-1,
|
|
per_device_eval_batch_size=16,
|
|
per_device_train_batch_size=4,
|
|
prediction_loss_only=False,
|
|
push_to_hub=False,
|
|
push_to_hub_model_id=None,
|
|
push_to_hub_organization=None,
|
|
push_to_hub_revision=False,
|
|
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
|
|
ray_scope=last,
|
|
remove_unused_columns=True,
|
|
report_to=['wandb'],
|
|
restore_callback_states_from_checkpoint=False,
|
|
resume_from_checkpoint=None,
|
|
router_manual_mask=None,
|
|
router_prune_enable=True,
|
|
router_prune_expert_per_layer=None,
|
|
router_prune_interval=5,
|
|
router_prune_min_keep=1,
|
|
router_prune_start_step=None,
|
|
router_prune_step_size=32,
|
|
router_prune_use_plan=True,
|
|
run_name=sft-base-ESFT-summary-epoch1,
|
|
save_on_each_node=False,
|
|
save_only_model=False,
|
|
save_safetensors=True,
|
|
save_steps=500,
|
|
save_strategy=SaveStrategy.NO,
|
|
save_total_limit=None,
|
|
seed=1234,
|
|
skip_memory_metrics=True,
|
|
split_batches=None,
|
|
system_prompt=None,
|
|
teacher_attn_implementation=None,
|
|
teacher_model_name_or_path=None,
|
|
teacher_model_revision=None,
|
|
teacher_torch_dtype=auto,
|
|
tf32=None,
|
|
torch_compile=False,
|
|
torch_compile_backend=None,
|
|
torch_compile_mode=None,
|
|
torch_empty_cache_steps=None,
|
|
torchdynamo=None,
|
|
tpu_metrics_debug=False,
|
|
tpu_num_cores=None,
|
|
use_cpu=False,
|
|
use_ipex=False,
|
|
use_legacy_prediction_loop=False,
|
|
use_liger=False,
|
|
use_liger_kernel=False,
|
|
use_mps_device=False,
|
|
wandb_entity=jayzxinkai-uc-san-diego,
|
|
wandb_project=moe-honing,
|
|
warmup_ratio=0.1,
|
|
warmup_steps=0,
|
|
weight_decay=0.0,
|
|
weight_feature_rank=None,
|
|
)
|
|
2026-03-06 05:17:46 - INFO - datasets.builder - Found cached dataset esft-summary (/tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138)
|
|
2026-03-06 05:17:46 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-9ce0bfa6d79abec6_*_of_00001.arrow
|
|
2026-03-06 05:17:46 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-52ef29cf0a901bd3_*_of_00001.arrow
|
|
2026-03-06 05:17:46 - INFO - __main__ - *** Initializing model kwargs ***
|
|
2026-03-06 05:17:59 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-d38655f5e847720d_*_of_00001.arrow
|
|
2026-03-06 05:17:59 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-043de10e8f4dd89b_*_of_00001.arrow
|
|
2026-03-06 05:17:59 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-53041fc4e59a6ac3_*_of_00001.arrow
|
|
2026-03-06 05:17:59 - INFO - datasets.arrow_dataset - Loading cached processed dataset at /tmp/hf_cache/datasets/RoxanneWsyw___esft-summary/default/0.0.0/70d8f41993d8681cb5ccb26c656f4b9f5e0f8138/cache-aa449e12f6179bf3_*_of_00001.arrow
|
|
2026-03-06 05:18:05 - INFO - __main__ - *** Train ***
|
|
2026-03-06 05:18:05 - INFO - __main__ - Qwen2MoeForCausalLM(
|
|
(model): Qwen2MoeModel(
|
|
(embed_tokens): Embedding(151936, 2048)
|
|
(layers): ModuleList(
|
|
(0-23): 24 x Qwen2MoeDecoderLayer(
|
|
(self_attn): Qwen2MoeFlashAttention2(
|
|
(q_proj): Linear(in_features=2048, out_features=2048, bias=True)
|
|
(k_proj): Linear(in_features=2048, out_features=2048, bias=True)
|
|
(v_proj): Linear(in_features=2048, out_features=2048, bias=True)
|
|
(o_proj): Linear(in_features=2048, out_features=2048, bias=False)
|
|
(rotary_emb): Qwen2MoeRotaryEmbedding()
|
|
)
|
|
(mlp): Qwen2MoeSparseMoeBlock(
|
|
(gate): Linear(in_features=2048, out_features=60, bias=False)
|
|
(experts): ModuleList(
|
|
(0-59): 60 x Qwen2MoeMLP(
|
|
(gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
|
|
(up_proj): Linear(in_features=2048, out_features=1408, bias=False)
|
|
(down_proj): Linear(in_features=1408, out_features=2048, bias=False)
|
|
(act_fn): SiLU()
|
|
)
|
|
)
|
|
(shared_expert): Qwen2MoeMLP(
|
|
(gate_proj): Linear(in_features=2048, out_features=5632, bias=False)
|
|
(up_proj): Linear(in_features=2048, out_features=5632, bias=False)
|
|
(down_proj): Linear(in_features=5632, out_features=2048, bias=False)
|
|
(act_fn): SiLU()
|
|
)
|
|
(shared_expert_gate): Linear(in_features=2048, out_features=1, bias=False)
|
|
)
|
|
(input_layernorm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
|
|
(post_attention_layernorm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
|
|
)
|
|
)
|
|
(norm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
|
|
(rotary_emb): Qwen2MoeRotaryEmbedding()
|
|
)
|
|
(lm_head): Linear(in_features=2048, out_features=151936, bias=False)
|
|
)
|
|
2026-03-06 06:01:41 - INFO - __main__ - *** Save model ***
|
|
2026-03-06 06:04:01 - INFO - __main__ - Model saved to /project/flame/haozeh/llm-honing/sft_models/Qwen1.5-MOE-sft-ESFT-summary
|