Files

226 lines
8.8 KiB
Plaintext
Raw Permalink Normal View History

2026-03-30 01:47:46 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='Qwen/Qwen1.5-MoE-A2.7B', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
2026-03-30 01:47:46 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='HectorHe/math7k', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
2026-03-30 01:47:46 - INFO - __main__ - Training parameters SFTConfig(
_n_gpu=1,
accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
auto_find_batch_size=False,
average_tokens_across_devices=False,
batch_eval_metrics=False,
benchmarks=[],
bf16=True,
bf16_full_eval=False,
callbacks=[],
chars_per_token=<CHARS_PER_TOKEN>,
chat_template=None,
data_seed=None,
dataloader_drop_last=False,
dataloader_num_workers=0,
dataloader_persistent_workers=False,
dataloader_pin_memory=True,
dataloader_prefetch_factor=None,
dataset_batch_size=None,
dataset_kwargs=None,
dataset_num_proc=None,
dataset_text_field=text,
ddp_backend=None,
ddp_broadcast_buffers=None,
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
ddp_timeout=1800000000,
debug=[],
deepspeed=None,
disable_tqdm=False,
do_eval=True,
do_predict=False,
do_train=False,
eval_accumulation_steps=None,
eval_delay=0,
eval_do_concat_batches=True,
eval_on_start=False,
eval_packing=None,
eval_steps=None,
eval_strategy=IntervalStrategy.NO,
eval_use_gather_object=False,
fp16=False,
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
fsdp=[],
fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
fsdp_min_num_params=0,
fsdp_transformer_layer_cls_to_wrap=None,
full_determinism=False,
gradient_accumulation_steps=1,
gradient_checkpointing=True,
gradient_checkpointing_kwargs={'use_reentrant': False},
greater_is_better=None,
group_by_length=False,
half_precision_backend=auto,
hub_always_push=False,
hub_model_id=Qwen1.5-MOE-sft-math7k-sft-2epochs-frozen-router,
hub_model_revision=main,
hub_private_repo=None,
hub_strategy=HubStrategy.EVERY_SAVE,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
include_for_metrics=[],
include_inputs_for_metrics=False,
include_num_input_tokens_seen=False,
include_tokens_per_second=False,
jit_mode_eval=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=1e-05,
length_column_name=length,
load_best_model_at_end=False,
local_rank=0,
log_level=info,
log_level_replica=warning,
log_on_each_node=True,
logging_dir=/tmp/data/Qwen1.5-MOE/sft/math7k/sft_2epochs-frozen-router/runs/Mar30_01-47-46_orchard-flame-16,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=1,
logging_strategy=IntervalStrategy.STEPS,
lr_scheduler_kwargs={'min_lr_rate': 0.1},
lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
max_grad_norm=1.0,
max_length=4096,
max_seq_length=None,
max_steps=-1,
metric_for_best_model=None,
model_init_kwargs=None,
mp_parameters=,
neftune_noise_alpha=None,
no_cuda=False,
num_of_sequences=None,
num_train_epochs=2,
optim=OptimizerNames.ADAMW_TORCH,
optim_args=None,
optim_target_modules=None,
output_dir=/tmp/data/Qwen1.5-MOE/sft/math7k/sft_2epochs-frozen-router,
overwrite_hub_revision=False,
overwrite_output_dir=True,
packing=False,
past_index=-1,
per_device_eval_batch_size=16,
per_device_train_batch_size=4,
prediction_loss_only=False,
push_to_hub=True,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_revision=False,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
ray_scope=last,
remove_unused_columns=True,
report_to=['wandb'],
restore_callback_states_from_checkpoint=False,
resume_from_checkpoint=None,
run_name=/tmp/data/Qwen1.5-MOE/sft/math7k/sft_2epochs-frozen-router,
save_on_each_node=False,
save_only_model=False,
save_safetensors=True,
save_steps=500,
save_strategy=SaveStrategy.EPOCH,
save_total_limit=1,
seed=1234,
skip_memory_metrics=True,
system_prompt=None,
tf32=None,
torch_compile=False,
torch_compile_backend=None,
torch_compile_mode=None,
torch_empty_cache_steps=None,
torchdynamo=None,
tp_size=0,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_cpu=False,
use_ipex=False,
use_legacy_prediction_loop=False,
use_liger=False,
use_liger_kernel=False,
use_mps_device=False,
wandb_entity=None,
wandb_project=None,
warmup_ratio=0.1,
warmup_steps=0,
weight_decay=0.0,
)
2026-03-30 01:47:48 - INFO - __main__ - *** Initializing model kwargs ***
2026-03-30 01:48:06 - INFO - __main__ - *** Freezing MoE router/gate parameters ***
2026-03-30 01:48:06 - INFO - __main__ - Froze 24 router parameters (2,949,120 params total)
2026-03-30 01:48:06 - INFO - __main__ - Frozen router modules (24):
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.0.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.1.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.10.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.11.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.12.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.13.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.14.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.15.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.16.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.17.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.18.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.19.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.2.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.20.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.21.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.22.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.23.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.3.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.4.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.5.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.6.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.7.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.8.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - - model.layers.9.mlp.gate
2026-03-30 01:48:06 - INFO - __main__ - Total params: 14,315,784,192 | Trainable: 14,312,835,072 (99.98%) | Frozen router params: 2,949,120
2026-03-30 01:48:19 - INFO - __main__ - *** Train ***
2026-03-30 01:48:19 - INFO - __main__ - Qwen2MoeForCausalLM(
(model): Qwen2MoeModel(
(embed_tokens): Embedding(151936, 2048)
(layers): ModuleList(
(0-23): 24 x Qwen2MoeDecoderLayer(
(self_attn): Qwen2MoeFlashAttention2(
(q_proj): Linear(in_features=2048, out_features=2048, bias=True)
(k_proj): Linear(in_features=2048, out_features=2048, bias=True)
(v_proj): Linear(in_features=2048, out_features=2048, bias=True)
(o_proj): Linear(in_features=2048, out_features=2048, bias=False)
(rotary_emb): Qwen2MoeRotaryEmbedding()
)
(mlp): Qwen2MoeSparseMoeBlock(
(gate): Linear(in_features=2048, out_features=60, bias=False)
(experts): ModuleList(
(0-59): 60 x Qwen2MoeMLP(
(gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
(up_proj): Linear(in_features=2048, out_features=1408, bias=False)
(down_proj): Linear(in_features=1408, out_features=2048, bias=False)
(act_fn): SiLU()
)
)
(shared_expert): Qwen2MoeMLP(
(gate_proj): Linear(in_features=2048, out_features=5632, bias=False)
(up_proj): Linear(in_features=2048, out_features=5632, bias=False)
(down_proj): Linear(in_features=5632, out_features=2048, bias=False)
(act_fn): SiLU()
)
(shared_expert_gate): Linear(in_features=2048, out_features=1, bias=False)
)
(input_layernorm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
(post_attention_layernorm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
)
)
(norm): Qwen2MoeRMSNorm((2048,), eps=1e-06)
(rotary_emb): Qwen2MoeRotaryEmbedding()
)
(lm_head): Linear(in_features=2048, out_features=151936, bias=False)
)
2026-03-30 02:27:20 - INFO - __main__ - *** Save model ***
2026-03-30 02:29:17 - INFO - __main__ - Model saved to /tmp/data/Qwen1.5-MOE/sft/math7k/sft_2epochs-frozen-router
2026-03-30 02:29:17 - INFO - __main__ - Pushing to hub...