初始化项目,由ModelHub XC社区提供模型
Model: pmahdavi/Llama-3.1-8B-coding Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||
63
README.md
Normal file
63
README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
library_name: transformers
|
||||
license: other
|
||||
base_model: meta-llama/Llama-3.1-8B
|
||||
tags:
|
||||
- llama-factory
|
||||
- full
|
||||
- generated_from_trainer
|
||||
datasets:
|
||||
- tulu3_category_loader
|
||||
model-index:
|
||||
- name: Llama-3.1-8B_tulu3_mixture_coding_full_ebs128_lr5e-06
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# Llama-3.1-8B_tulu3_mixture_coding_full_ebs128_lr5e-06
|
||||
|
||||
This model is a fine-tuned version of [meta-llama/Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) on the tulu3_mixture_coding dataset. This checkpoint was released alongside https://arxiv.org/abs/2509.11167.
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 5e-06
|
||||
- train_batch_size: 2
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- distributed_type: multi-GPU
|
||||
- num_devices: 2
|
||||
- gradient_accumulation_steps: 32
|
||||
- total_train_batch_size: 128
|
||||
- total_eval_batch_size: 16
|
||||
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
||||
- lr_scheduler_type: cosine
|
||||
- lr_scheduler_warmup_ratio: 0.03
|
||||
- num_epochs: 1.0
|
||||
|
||||
### Training results
|
||||
|
||||
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.51.1
|
||||
- Pytorch 2.6.0+cu124
|
||||
- Datasets 3.4.1
|
||||
- Tokenizers 0.21.0
|
||||
8
all_results.json
Normal file
8
all_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 0.9995220557226798,
|
||||
"total_flos": 145966987345920.0,
|
||||
"train_loss": 0.845390060482317,
|
||||
"train_runtime": 63839.4451,
|
||||
"train_samples_per_second": 2.229,
|
||||
"train_steps_per_second": 0.017
|
||||
}
|
||||
35
config.json
Normal file
35
config.json
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 128000,
|
||||
"eos_token_id": 128001,
|
||||
"head_dim": 128,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 131072,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"pretraining_tp": 1,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": {
|
||||
"factor": 8.0,
|
||||
"high_freq_factor": 4.0,
|
||||
"low_freq_factor": 1.0,
|
||||
"original_max_position_embeddings": 8192,
|
||||
"rope_type": "llama3"
|
||||
},
|
||||
"rope_theta": 500000.0,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.51.1",
|
||||
"use_cache": false,
|
||||
"vocab_size": 128256
|
||||
}
|
||||
3
export/exp_avg_sq.safetensors
Normal file
3
export/exp_avg_sq.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:fb0af75bc627783ba0187e3f8b2264e4ba60696e0ded25993c9a3bec8bd7c88c
|
||||
size 32121079000
|
||||
3
export/fisher_diag_s1024_bs128.safetensors
Normal file
3
export/fisher_diag_s1024_bs128.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:79ad01a1a1cebb118a5b35a93ffb1692681771b4973b8fcbc4522675ec85145b
|
||||
size 32121079000
|
||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 128000,
|
||||
"do_sample": true,
|
||||
"eos_token_id": 128001,
|
||||
"temperature": 0.6,
|
||||
"top_p": 0.9,
|
||||
"transformers_version": "4.51.1"
|
||||
}
|
||||
3
model-00001-of-00004.safetensors
Normal file
3
model-00001-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:01aabe91e0e8bd9d90749107831611e150ac3582fd141e612323de374e9bb9d8
|
||||
size 4976698672
|
||||
3
model-00002-of-00004.safetensors
Normal file
3
model-00002-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b2e147cf033413e63247a438c0b23632c7bafaad032c717431b7b77c1b24972a
|
||||
size 4999802720
|
||||
3
model-00003-of-00004.safetensors
Normal file
3
model-00003-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:fc4f96a5d98f58401c550db8dc8495f6f9606541520d57070c7be3e650b42d82
|
||||
size 4915916176
|
||||
3
model-00004-of-00004.safetensors
Normal file
3
model-00004-of-00004.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f7bd0a4746e3a0712c57bdfdf184d65fe3542e4e1a8600a2285269f632df28d4
|
||||
size 1168138808
|
||||
298
model.safetensors.index.json
Normal file
298
model.safetensors.index.json
Normal file
@@ -0,0 +1,298 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 16060522496
|
||||
},
|
||||
"weight_map": {
|
||||
"lm_head.weight": "model-00004-of-00004.safetensors",
|
||||
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
||||
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
||||
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
||||
"model.norm.weight": "model-00004-of-00004.safetensors"
|
||||
}
|
||||
}
|
||||
3
model_states/zero_pp_rank_0_mp_rank_00_model_states.pt
Normal file
3
model_states/zero_pp_rank_0_mp_rank_00_model_states.pt
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ce1cbccd213c73f5f14cd8ef82d784b903a600bb3fcefec401a7b6839aae1be2
|
||||
size 149093
|
||||
3
model_states/zero_pp_rank_1_mp_rank_00_model_states.pt
Normal file
3
model_states/zero_pp_rank_1_mp_rank_00_model_states.pt
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5f77c8df7fbe5a5055801516cc593d0504d6a0928671f8cfb7ae449d08be203b
|
||||
size 149093
|
||||
3
rng_state_0.pth
Normal file
3
rng_state_0.pth
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c8d6a959372d5e0c2ea025dd26c9d0ad2046fce19352056cae8074dcbd0a6fd4
|
||||
size 14512
|
||||
3
rng_state_1.pth
Normal file
3
rng_state_1.pth
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
|
||||
size 14512
|
||||
3
scheduler.pt
Normal file
3
scheduler.pt
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:36dd5f953aa6491cbd5bcc3e1bb0bb52769f592a7ef677e16c6afe767515245d
|
||||
size 1064
|
||||
17
special_tokens_map.json
Normal file
17
special_tokens_map.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|begin_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|end_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<|end_of_text|>"
|
||||
}
|
||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
||||
size 17209920
|
||||
2066
tokenizer_config.json
Normal file
2066
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
8
train_results.json
Normal file
8
train_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 0.9995220557226798,
|
||||
"total_flos": 145966987345920.0,
|
||||
"train_loss": 0.845390060482317,
|
||||
"train_runtime": 63839.4451,
|
||||
"train_samples_per_second": 2.229,
|
||||
"train_steps_per_second": 0.017
|
||||
}
|
||||
112
trainer_log.jsonl
Normal file
112
trainer_log.jsonl
Normal file
@@ -0,0 +1,112 @@
|
||||
{"current_steps": 10, "total_steps": 1111, "loss": 1.1112, "lr": 1.323529411764706e-06, "epoch": 0.00899659816132025, "percentage": 0.9, "elapsed_time": "0:09:34", "remaining_time": "17:35:06"}
|
||||
{"current_steps": 20, "total_steps": 1111, "loss": 0.9362, "lr": 2.7941176470588237e-06, "epoch": 0.0179931963226405, "percentage": 1.8, "elapsed_time": "0:19:07", "remaining_time": "17:23:32"}
|
||||
{"current_steps": 30, "total_steps": 1111, "loss": 0.8806, "lr": 4.264705882352942e-06, "epoch": 0.02698979448396075, "percentage": 2.7, "elapsed_time": "0:28:40", "remaining_time": "17:13:04"}
|
||||
{"current_steps": 40, "total_steps": 1111, "loss": 0.8947, "lr": 4.9997341046993195e-06, "epoch": 0.035986392645281, "percentage": 3.6, "elapsed_time": "0:38:10", "remaining_time": "17:02:02"}
|
||||
{"current_steps": 50, "total_steps": 1111, "loss": 0.8764, "lr": 4.997607281643338e-06, "epoch": 0.04498299080660125, "percentage": 4.5, "elapsed_time": "0:47:51", "remaining_time": "16:55:29"}
|
||||
{"current_steps": 60, "total_steps": 1111, "loss": 0.8758, "lr": 4.993355445074358e-06, "epoch": 0.0539795889679215, "percentage": 5.4, "elapsed_time": "0:57:26", "remaining_time": "16:46:14"}
|
||||
{"current_steps": 70, "total_steps": 1111, "loss": 0.8839, "lr": 4.986982212538754e-06, "epoch": 0.06297618712924176, "percentage": 6.3, "elapsed_time": "1:07:01", "remaining_time": "16:36:41"}
|
||||
{"current_steps": 80, "total_steps": 1111, "loss": 0.8741, "lr": 4.978493006508408e-06, "epoch": 0.071972785290562, "percentage": 7.2, "elapsed_time": "1:16:24", "remaining_time": "16:24:40"}
|
||||
{"current_steps": 90, "total_steps": 1111, "loss": 0.8859, "lr": 4.967895049767168e-06, "epoch": 0.08096938345188226, "percentage": 8.1, "elapsed_time": "1:26:02", "remaining_time": "16:16:00"}
|
||||
{"current_steps": 100, "total_steps": 1111, "loss": 0.8506, "lr": 4.9551973592655565e-06, "epoch": 0.0899659816132025, "percentage": 9.0, "elapsed_time": "1:35:30", "remaining_time": "16:05:31"}
|
||||
{"current_steps": 110, "total_steps": 1111, "loss": 0.9021, "lr": 4.940410738448974e-06, "epoch": 0.09896257977452276, "percentage": 9.9, "elapsed_time": "1:44:58", "remaining_time": "15:55:17"}
|
||||
{"current_steps": 120, "total_steps": 1111, "loss": 0.8752, "lr": 4.923547768065916e-06, "epoch": 0.107959177935843, "percentage": 10.8, "elapsed_time": "1:54:30", "remaining_time": "15:45:38"}
|
||||
{"current_steps": 130, "total_steps": 1111, "loss": 0.8824, "lr": 4.904622795464018e-06, "epoch": 0.11695577609716326, "percentage": 11.7, "elapsed_time": "2:03:56", "remaining_time": "15:35:18"}
|
||||
{"current_steps": 140, "total_steps": 1111, "loss": 0.8519, "lr": 4.883651922383059e-06, "epoch": 0.12595237425848352, "percentage": 12.6, "elapsed_time": "2:13:35", "remaining_time": "15:26:33"}
|
||||
{"current_steps": 150, "total_steps": 1111, "loss": 0.8565, "lr": 4.860652991255274e-06, "epoch": 0.13494897241980378, "percentage": 13.5, "elapsed_time": "2:23:06", "remaining_time": "15:16:51"}
|
||||
{"current_steps": 160, "total_steps": 1111, "loss": 0.8884, "lr": 4.835645570024666e-06, "epoch": 0.143945570581124, "percentage": 14.4, "elapsed_time": "2:32:26", "remaining_time": "15:06:07"}
|
||||
{"current_steps": 170, "total_steps": 1111, "loss": 0.8394, "lr": 4.808650935498216e-06, "epoch": 0.15294216874244426, "percentage": 15.3, "elapsed_time": "2:41:57", "remaining_time": "14:56:27"}
|
||||
{"current_steps": 180, "total_steps": 1111, "loss": 0.8542, "lr": 4.779692055243149e-06, "epoch": 0.16193876690376452, "percentage": 16.2, "elapsed_time": "2:51:30", "remaining_time": "14:47:02"}
|
||||
{"current_steps": 190, "total_steps": 1111, "loss": 0.8633, "lr": 4.748793568045682e-06, "epoch": 0.17093536506508478, "percentage": 17.1, "elapsed_time": "3:00:52", "remaining_time": "14:36:45"}
|
||||
{"current_steps": 200, "total_steps": 1111, "loss": 0.8532, "lr": 4.715981762947854e-06, "epoch": 0.179931963226405, "percentage": 18.0, "elapsed_time": "3:10:27", "remaining_time": "14:27:31"}
|
||||
{"current_steps": 210, "total_steps": 1111, "loss": 0.8544, "lr": 4.681284556880294e-06, "epoch": 0.18892856138772526, "percentage": 18.9, "elapsed_time": "3:19:50", "remaining_time": "14:17:24"}
|
||||
{"current_steps": 220, "total_steps": 1111, "loss": 0.8463, "lr": 4.6447314709099436e-06, "epoch": 0.19792515954904552, "percentage": 19.8, "elapsed_time": "3:29:20", "remaining_time": "14:07:48"}
|
||||
{"current_steps": 230, "total_steps": 1111, "loss": 0.8652, "lr": 4.606353605122954e-06, "epoch": 0.20692175771036578, "percentage": 20.7, "elapsed_time": "3:38:52", "remaining_time": "13:58:21"}
|
||||
{"current_steps": 240, "total_steps": 1111, "loss": 0.8737, "lr": 4.566183612164116e-06, "epoch": 0.215918355871686, "percentage": 21.6, "elapsed_time": "3:48:22", "remaining_time": "13:48:47"}
|
||||
{"current_steps": 250, "total_steps": 1111, "loss": 0.8448, "lr": 4.52425566945535e-06, "epoch": 0.22491495403300626, "percentage": 22.5, "elapsed_time": "3:57:54", "remaining_time": "13:39:20"}
|
||||
{"current_steps": 260, "total_steps": 1111, "loss": 0.8531, "lr": 4.480605450116879e-06, "epoch": 0.23391155219432652, "percentage": 23.4, "elapsed_time": "4:07:39", "remaining_time": "13:30:35"}
|
||||
{"current_steps": 270, "total_steps": 1111, "loss": 0.8569, "lr": 4.435270092615835e-06, "epoch": 0.24290815035564678, "percentage": 24.3, "elapsed_time": "4:17:12", "remaining_time": "13:21:09"}
|
||||
{"current_steps": 280, "total_steps": 1111, "loss": 0.8559, "lr": 4.388288169168121e-06, "epoch": 0.25190474851696704, "percentage": 25.2, "elapsed_time": "4:26:43", "remaining_time": "13:11:34"}
|
||||
{"current_steps": 290, "total_steps": 1111, "loss": 0.8487, "lr": 4.339699652920407e-06, "epoch": 0.2609013466782873, "percentage": 26.1, "elapsed_time": "4:36:12", "remaining_time": "13:01:56"}
|
||||
{"current_steps": 300, "total_steps": 1111, "loss": 0.8427, "lr": 4.28954588394019e-06, "epoch": 0.26989794483960755, "percentage": 27.0, "elapsed_time": "4:45:47", "remaining_time": "12:52:36"}
|
||||
{"current_steps": 310, "total_steps": 1111, "loss": 0.8645, "lr": 4.237869534042848e-06, "epoch": 0.27889454300092775, "percentage": 27.9, "elapsed_time": "4:55:26", "remaining_time": "12:43:22"}
|
||||
{"current_steps": 320, "total_steps": 1111, "loss": 0.8739, "lr": 4.184714570485619e-06, "epoch": 0.287891141162248, "percentage": 28.8, "elapsed_time": "5:04:52", "remaining_time": "12:33:35"}
|
||||
{"current_steps": 330, "total_steps": 1111, "loss": 0.8388, "lr": 4.130126218559396e-06, "epoch": 0.29688773932356827, "percentage": 29.7, "elapsed_time": "5:14:23", "remaining_time": "12:24:03"}
|
||||
{"current_steps": 340, "total_steps": 1111, "loss": 0.8344, "lr": 4.074150923110149e-06, "epoch": 0.3058843374848885, "percentage": 30.6, "elapsed_time": "5:23:57", "remaining_time": "12:14:37"}
|
||||
{"current_steps": 350, "total_steps": 1111, "loss": 0.8489, "lr": 4.0168363090227425e-06, "epoch": 0.3148809356462088, "percentage": 31.5, "elapsed_time": "5:33:35", "remaining_time": "12:05:19"}
|
||||
{"current_steps": 360, "total_steps": 1111, "loss": 0.8827, "lr": 3.958231140700742e-06, "epoch": 0.32387753380752904, "percentage": 32.4, "elapsed_time": "5:43:11", "remaining_time": "11:55:55"}
|
||||
{"current_steps": 370, "total_steps": 1111, "loss": 0.8257, "lr": 3.898385280576696e-06, "epoch": 0.3328741319688493, "percentage": 33.3, "elapsed_time": "5:52:40", "remaining_time": "11:46:17"}
|
||||
{"current_steps": 380, "total_steps": 1111, "loss": 0.8586, "lr": 3.8373496466881986e-06, "epoch": 0.34187073013016955, "percentage": 34.2, "elapsed_time": "6:02:11", "remaining_time": "11:36:44"}
|
||||
{"current_steps": 390, "total_steps": 1111, "loss": 0.8633, "lr": 3.775176169355816e-06, "epoch": 0.35086732829148976, "percentage": 35.1, "elapsed_time": "6:11:39", "remaining_time": "11:27:05"}
|
||||
{"current_steps": 400, "total_steps": 1111, "loss": 0.8385, "lr": 3.7119177469997506e-06, "epoch": 0.35986392645281, "percentage": 36.0, "elapsed_time": "6:21:02", "remaining_time": "11:17:17"}
|
||||
{"current_steps": 410, "total_steps": 1111, "loss": 0.8167, "lr": 3.647628201132818e-06, "epoch": 0.36886052461413027, "percentage": 36.9, "elapsed_time": "6:30:52", "remaining_time": "11:08:18"}
|
||||
{"current_steps": 420, "total_steps": 1111, "loss": 0.8331, "lr": 3.582362230568044e-06, "epoch": 0.3778571227754505, "percentage": 37.8, "elapsed_time": "6:40:24", "remaining_time": "10:58:46"}
|
||||
{"current_steps": 430, "total_steps": 1111, "loss": 0.8337, "lr": 3.5161753648798367e-06, "epoch": 0.3868537209367708, "percentage": 38.7, "elapsed_time": "6:50:05", "remaining_time": "10:49:27"}
|
||||
{"current_steps": 440, "total_steps": 1111, "loss": 0.8371, "lr": 3.449123917158331e-06, "epoch": 0.39585031909809104, "percentage": 39.6, "elapsed_time": "6:59:37", "remaining_time": "10:39:55"}
|
||||
{"current_steps": 450, "total_steps": 1111, "loss": 0.8368, "lr": 3.3812649360970988e-06, "epoch": 0.4048469172594113, "percentage": 40.5, "elapsed_time": "7:09:12", "remaining_time": "10:30:27"}
|
||||
{"current_steps": 460, "total_steps": 1111, "loss": 0.8322, "lr": 3.3126561574549975e-06, "epoch": 0.41384351542073156, "percentage": 41.4, "elapsed_time": "7:18:48", "remaining_time": "10:21:00"}
|
||||
{"current_steps": 470, "total_steps": 1111, "loss": 0.8295, "lr": 3.2433559549334475e-06, "epoch": 0.4228401135820518, "percentage": 42.3, "elapsed_time": "7:28:15", "remaining_time": "10:11:20"}
|
||||
{"current_steps": 480, "total_steps": 1111, "loss": 0.8343, "lr": 3.173423290510937e-06, "epoch": 0.431836711743372, "percentage": 43.2, "elapsed_time": "7:37:47", "remaining_time": "10:01:48"}
|
||||
{"current_steps": 490, "total_steps": 1111, "loss": 0.8548, "lr": 3.102917664277007e-06, "epoch": 0.44083330990469227, "percentage": 44.1, "elapsed_time": "7:47:22", "remaining_time": "9:52:19"}
|
||||
{"current_steps": 500, "total_steps": 1111, "loss": 0.8558, "lr": 3.0318990638084055e-06, "epoch": 0.44982990806601253, "percentage": 45.0, "elapsed_time": "7:56:45", "remaining_time": "9:42:35"}
|
||||
{"current_steps": 510, "total_steps": 1111, "loss": 0.8441, "lr": 2.9604279131304685e-06, "epoch": 0.4588265062273328, "percentage": 45.9, "elapsed_time": "8:06:05", "remaining_time": "9:32:49"}
|
||||
{"current_steps": 520, "total_steps": 1111, "loss": 0.8346, "lr": 2.8885650213071746e-06, "epoch": 0.46782310438865304, "percentage": 46.8, "elapsed_time": "8:15:32", "remaining_time": "9:23:11"}
|
||||
{"current_steps": 530, "total_steps": 1111, "loss": 0.8363, "lr": 2.8163715307035897e-06, "epoch": 0.4768197025499733, "percentage": 47.7, "elapsed_time": "8:25:07", "remaining_time": "9:13:44"}
|
||||
{"current_steps": 540, "total_steps": 1111, "loss": 0.8637, "lr": 2.743908864964741e-06, "epoch": 0.48581630071129356, "percentage": 48.6, "elapsed_time": "8:34:33", "remaining_time": "9:04:05"}
|
||||
{"current_steps": 550, "total_steps": 1111, "loss": 0.8439, "lr": 2.6712386767551663e-06, "epoch": 0.4948128988726138, "percentage": 49.5, "elapsed_time": "8:44:05", "remaining_time": "8:54:34"}
|
||||
{"current_steps": 560, "total_steps": 1111, "loss": 0.8485, "lr": 2.5984227953036124e-06, "epoch": 0.5038094970339341, "percentage": 50.41, "elapsed_time": "8:53:31", "remaining_time": "8:44:56"}
|
||||
{"current_steps": 570, "total_steps": 1111, "loss": 0.8279, "lr": 2.52552317379751e-06, "epoch": 0.5128060951952543, "percentage": 51.31, "elapsed_time": "9:03:08", "remaining_time": "8:35:30"}
|
||||
{"current_steps": 580, "total_steps": 1111, "loss": 0.8744, "lr": 2.452601836671977e-06, "epoch": 0.5218026933565746, "percentage": 52.21, "elapsed_time": "9:12:39", "remaining_time": "8:25:57"}
|
||||
{"current_steps": 590, "total_steps": 1111, "loss": 0.817, "lr": 2.3797208268382096e-06, "epoch": 0.5307992915178948, "percentage": 53.11, "elapsed_time": "9:22:10", "remaining_time": "8:16:26"}
|
||||
{"current_steps": 600, "total_steps": 1111, "loss": 0.8454, "lr": 2.3069421528961493e-06, "epoch": 0.5397958896792151, "percentage": 54.01, "elapsed_time": "9:31:40", "remaining_time": "8:06:52"}
|
||||
{"current_steps": 610, "total_steps": 1111, "loss": 0.821, "lr": 2.2343277363763437e-06, "epoch": 0.5487924878405352, "percentage": 54.91, "elapsed_time": "9:42:43", "remaining_time": "7:58:35"}
|
||||
{"current_steps": 620, "total_steps": 1111, "loss": 0.8444, "lr": 2.1619393590558857e-06, "epoch": 0.5577890860018555, "percentage": 55.81, "elapsed_time": "9:52:14", "remaining_time": "7:49:01"}
|
||||
{"current_steps": 630, "total_steps": 1111, "loss": 0.8296, "lr": 2.0898386103932634e-06, "epoch": 0.5667856841631758, "percentage": 56.71, "elapsed_time": "10:01:44", "remaining_time": "7:39:25"}
|
||||
{"current_steps": 640, "total_steps": 1111, "loss": 0.8425, "lr": 2.018086835126831e-06, "epoch": 0.575782282324496, "percentage": 57.61, "elapsed_time": "10:11:16", "remaining_time": "7:29:51"}
|
||||
{"current_steps": 650, "total_steps": 1111, "loss": 0.8356, "lr": 1.9467450810814984e-06, "epoch": 0.5847788804858163, "percentage": 58.51, "elapsed_time": "10:20:53", "remaining_time": "7:20:21"}
|
||||
{"current_steps": 660, "total_steps": 1111, "loss": 0.8123, "lr": 1.8758740472280372e-06, "epoch": 0.5937754786471365, "percentage": 59.41, "elapsed_time": "10:30:33", "remaining_time": "7:10:52"}
|
||||
{"current_steps": 670, "total_steps": 1111, "loss": 0.8124, "lr": 1.8055340320392002e-06, "epoch": 0.6027720768084568, "percentage": 60.31, "elapsed_time": "10:40:13", "remaining_time": "7:01:24"}
|
||||
{"current_steps": 680, "total_steps": 1111, "loss": 0.8469, "lr": 1.7357848821865914e-06, "epoch": 0.611768674969777, "percentage": 61.21, "elapsed_time": "10:49:55", "remaining_time": "6:51:56"}
|
||||
{"current_steps": 690, "total_steps": 1111, "loss": 0.8171, "lr": 1.6666859416219384e-06, "epoch": 0.6207652731310973, "percentage": 62.11, "elapsed_time": "10:59:27", "remaining_time": "6:42:21"}
|
||||
{"current_steps": 700, "total_steps": 1111, "loss": 0.8105, "lr": 1.5982960010860882e-06, "epoch": 0.6297618712924176, "percentage": 63.01, "elapsed_time": "11:09:01", "remaining_time": "6:32:48"}
|
||||
{"current_steps": 710, "total_steps": 1111, "loss": 0.8342, "lr": 1.530673248088687e-06, "epoch": 0.6387584694537378, "percentage": 63.91, "elapsed_time": "11:18:33", "remaining_time": "6:23:14"}
|
||||
{"current_steps": 720, "total_steps": 1111, "loss": 0.8715, "lr": 1.463875217401099e-06, "epoch": 0.6477550676150581, "percentage": 64.81, "elapsed_time": "11:28:05", "remaining_time": "6:13:40"}
|
||||
{"current_steps": 730, "total_steps": 1111, "loss": 0.8355, "lr": 1.397958742104687e-06, "epoch": 0.6567516657763783, "percentage": 65.71, "elapsed_time": "11:37:35", "remaining_time": "6:04:04"}
|
||||
{"current_steps": 740, "total_steps": 1111, "loss": 0.8148, "lr": 1.3329799052361094e-06, "epoch": 0.6657482639376986, "percentage": 66.61, "elapsed_time": "11:47:05", "remaining_time": "5:54:29"}
|
||||
{"current_steps": 750, "total_steps": 1111, "loss": 0.8229, "lr": 1.2689939920707667e-06, "epoch": 0.6747448620990188, "percentage": 67.51, "elapsed_time": "11:56:45", "remaining_time": "5:45:00"}
|
||||
{"current_steps": 760, "total_steps": 1111, "loss": 0.8409, "lr": 1.2060554430849951e-06, "epoch": 0.6837414602603391, "percentage": 68.41, "elapsed_time": "12:06:22", "remaining_time": "5:35:28"}
|
||||
{"current_steps": 770, "total_steps": 1111, "loss": 0.8258, "lr": 1.1442178076370426e-06, "epoch": 0.6927380584216594, "percentage": 69.31, "elapsed_time": "12:15:57", "remaining_time": "5:25:55"}
|
||||
{"current_steps": 780, "total_steps": 1111, "loss": 0.8162, "lr": 1.0835336984062122e-06, "epoch": 0.7017346565829795, "percentage": 70.21, "elapsed_time": "12:25:41", "remaining_time": "5:16:26"}
|
||||
{"current_steps": 790, "total_steps": 1111, "loss": 0.808, "lr": 1.0240547466289646e-06, "epoch": 0.7107312547442998, "percentage": 71.11, "elapsed_time": "12:35:10", "remaining_time": "5:06:51"}
|
||||
{"current_steps": 800, "total_steps": 1111, "loss": 0.8314, "lr": 9.65831558170037e-07, "epoch": 0.71972785290562, "percentage": 72.01, "elapsed_time": "12:44:58", "remaining_time": "4:57:23"}
|
||||
{"current_steps": 810, "total_steps": 1111, "loss": 0.8437, "lr": 9.089136704659823e-07, "epoch": 0.7287244510669403, "percentage": 72.91, "elapsed_time": "12:54:27", "remaining_time": "4:47:47"}
|
||||
{"current_steps": 820, "total_steps": 1111, "loss": 0.8052, "lr": 8.533495103777367e-07, "epoch": 0.7377210492282605, "percentage": 73.81, "elapsed_time": "13:04:01", "remaining_time": "4:38:13"}
|
||||
{"current_steps": 830, "total_steps": 1111, "loss": 0.7951, "lr": 7.991863529880936e-07, "epoch": 0.7467176473895808, "percentage": 74.71, "elapsed_time": "13:13:30", "remaining_time": "4:28:38"}
|
||||
{"current_steps": 840, "total_steps": 1111, "loss": 0.8461, "lr": 7.464702813791308e-07, "epoch": 0.755714245550901, "percentage": 75.61, "elapsed_time": "13:23:03", "remaining_time": "4:19:04"}
|
||||
{"current_steps": 850, "total_steps": 1111, "loss": 0.8045, "lr": 6.952461474238182e-07, "epoch": 0.7647108437122213, "percentage": 76.51, "elapsed_time": "13:32:40", "remaining_time": "4:09:32"}
|
||||
{"current_steps": 860, "total_steps": 1111, "loss": 0.8562, "lr": 6.455575336251549e-07, "epoch": 0.7737074418735416, "percentage": 77.41, "elapsed_time": "13:42:03", "remaining_time": "3:59:55"}
|
||||
{"current_steps": 870, "total_steps": 1111, "loss": 0.8345, "lr": 5.974467160353226e-07, "epoch": 0.7827040400348618, "percentage": 78.31, "elapsed_time": "13:51:29", "remaining_time": "3:50:19"}
|
||||
{"current_steps": 880, "total_steps": 1111, "loss": 0.8344, "lr": 5.509546282863806e-07, "epoch": 0.7917006381961821, "percentage": 79.21, "elapsed_time": "14:00:56", "remaining_time": "3:40:44"}
|
||||
{"current_steps": 890, "total_steps": 1111, "loss": 0.8256, "lr": 5.061208267631315e-07, "epoch": 0.8006972363575023, "percentage": 80.11, "elapsed_time": "14:10:26", "remaining_time": "3:31:10"}
|
||||
{"current_steps": 900, "total_steps": 1111, "loss": 0.8427, "lr": 4.629834569477626e-07, "epoch": 0.8096938345188226, "percentage": 81.01, "elapsed_time": "14:19:56", "remaining_time": "3:21:36"}
|
||||
{"current_steps": 910, "total_steps": 1111, "loss": 0.8195, "lr": 4.21579220964923e-07, "epoch": 0.8186904326801429, "percentage": 81.91, "elapsed_time": "14:29:37", "remaining_time": "3:12:04"}
|
||||
{"current_steps": 920, "total_steps": 1111, "loss": 0.8277, "lr": 3.8194334635482777e-07, "epoch": 0.8276870308414631, "percentage": 82.81, "elapsed_time": "14:39:09", "remaining_time": "3:02:31"}
|
||||
{"current_steps": 930, "total_steps": 1111, "loss": 0.811, "lr": 3.4410955610097745e-07, "epoch": 0.8366836290027834, "percentage": 83.71, "elapsed_time": "14:48:59", "remaining_time": "2:53:01"}
|
||||
{"current_steps": 940, "total_steps": 1111, "loss": 0.8187, "lr": 3.0811003993797327e-07, "epoch": 0.8456802271641036, "percentage": 84.61, "elapsed_time": "14:58:35", "remaining_time": "2:43:28"}
|
||||
{"current_steps": 950, "total_steps": 1111, "loss": 0.8352, "lr": 2.7397542696386145e-07, "epoch": 0.8546768253254238, "percentage": 85.51, "elapsed_time": "15:08:06", "remaining_time": "2:33:53"}
|
||||
{"current_steps": 960, "total_steps": 1111, "loss": 0.8076, "lr": 2.4173475958028855e-07, "epoch": 0.863673423486744, "percentage": 86.41, "elapsed_time": "15:17:39", "remaining_time": "2:24:20"}
|
||||
{"current_steps": 970, "total_steps": 1111, "loss": 0.8357, "lr": 2.1141546878265696e-07, "epoch": 0.8726700216480643, "percentage": 87.31, "elapsed_time": "15:27:13", "remaining_time": "2:14:46"}
|
||||
{"current_steps": 980, "total_steps": 1111, "loss": 0.8375, "lr": 1.8304335082129032e-07, "epoch": 0.8816666198093845, "percentage": 88.21, "elapsed_time": "15:36:42", "remaining_time": "2:05:12"}
|
||||
{"current_steps": 990, "total_steps": 1111, "loss": 0.8482, "lr": 1.566425452534784e-07, "epoch": 0.8906632179707048, "percentage": 89.11, "elapsed_time": "15:46:15", "remaining_time": "1:55:39"}
|
||||
{"current_steps": 1000, "total_steps": 1111, "loss": 0.8334, "lr": 1.3223551440506244e-07, "epoch": 0.8996598161320251, "percentage": 90.01, "elapsed_time": "15:55:56", "remaining_time": "1:46:06"}
|
||||
{"current_steps": 1010, "total_steps": 1111, "loss": 0.8218, "lr": 1.0984302425904869e-07, "epoch": 0.9086564142933453, "percentage": 90.91, "elapsed_time": "16:05:38", "remaining_time": "1:36:33"}
|
||||
{"current_steps": 1020, "total_steps": 1111, "loss": 0.8242, "lr": 8.94841267874974e-08, "epoch": 0.9176530124546656, "percentage": 91.81, "elapsed_time": "16:15:10", "remaining_time": "1:27:00"}
|
||||
{"current_steps": 1030, "total_steps": 1111, "loss": 0.8276, "lr": 7.117614374173353e-08, "epoch": 0.9266496106159858, "percentage": 92.71, "elapsed_time": "16:24:46", "remaining_time": "1:17:26"}
|
||||
{"current_steps": 1040, "total_steps": 1111, "loss": 0.8399, "lr": 5.493465191465458e-08, "epoch": 0.9356462087773061, "percentage": 93.61, "elapsed_time": "16:34:25", "remaining_time": "1:07:53"}
|
||||
{"current_steps": 1050, "total_steps": 1111, "loss": 0.8231, "lr": 4.0773469887692154e-08, "epoch": 0.9446428069386263, "percentage": 94.51, "elapsed_time": "16:44:05", "remaining_time": "0:58:20"}
|
||||
{"current_steps": 1060, "total_steps": 1111, "loss": 0.8089, "lr": 2.8704646273687298e-08, "epoch": 0.9536394050999466, "percentage": 95.41, "elapsed_time": "16:53:30", "remaining_time": "0:48:45"}
|
||||
{"current_steps": 1070, "total_steps": 1111, "loss": 0.8546, "lr": 1.873844946569614e-08, "epoch": 0.9626360032612669, "percentage": 96.31, "elapsed_time": "17:02:54", "remaining_time": "0:39:11"}
|
||||
{"current_steps": 1080, "total_steps": 1111, "loss": 0.8213, "lr": 1.0883358900435626e-08, "epoch": 0.9716326014225871, "percentage": 97.21, "elapsed_time": "17:12:27", "remaining_time": "0:29:38"}
|
||||
{"current_steps": 1090, "total_steps": 1111, "loss": 0.8261, "lr": 5.146057843814223e-09, "epoch": 0.9806291995839074, "percentage": 98.11, "elapsed_time": "17:22:11", "remaining_time": "0:20:04"}
|
||||
{"current_steps": 1100, "total_steps": 1111, "loss": 0.8431, "lr": 1.531427704675459e-09, "epoch": 0.9896257977452276, "percentage": 99.01, "elapsed_time": "17:31:46", "remaining_time": "0:10:31"}
|
||||
{"current_steps": 1110, "total_steps": 1111, "loss": 0.8082, "lr": 4.25438816009649e-11, "epoch": 0.9986223959065479, "percentage": 99.91, "elapsed_time": "17:41:19", "remaining_time": "0:00:57"}
|
||||
{"current_steps": 1111, "total_steps": 1111, "epoch": 0.9995220557226798, "percentage": 100.0, "elapsed_time": "17:43:58", "remaining_time": "0:00:00"}
|
||||
811
trainer_state.json
Normal file
811
trainer_state.json
Normal file
@@ -0,0 +1,811 @@
|
||||
{
|
||||
"best_global_step": null,
|
||||
"best_metric": null,
|
||||
"best_model_checkpoint": null,
|
||||
"epoch": 0.9995220557226798,
|
||||
"eval_steps": 500,
|
||||
"global_step": 1111,
|
||||
"is_hyper_param_search": false,
|
||||
"is_local_process_zero": true,
|
||||
"is_world_process_zero": true,
|
||||
"log_history": [
|
||||
{
|
||||
"epoch": 0.00899659816132025,
|
||||
"grad_norm": 9.407082207126406,
|
||||
"learning_rate": 1.323529411764706e-06,
|
||||
"loss": 1.1112,
|
||||
"step": 10
|
||||
},
|
||||
{
|
||||
"epoch": 0.0179931963226405,
|
||||
"grad_norm": 1.6078560053885997,
|
||||
"learning_rate": 2.7941176470588237e-06,
|
||||
"loss": 0.9362,
|
||||
"step": 20
|
||||
},
|
||||
{
|
||||
"epoch": 0.02698979448396075,
|
||||
"grad_norm": 1.624171273563677,
|
||||
"learning_rate": 4.264705882352942e-06,
|
||||
"loss": 0.8806,
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"epoch": 0.035986392645281,
|
||||
"grad_norm": 1.4554360159842772,
|
||||
"learning_rate": 4.9997341046993195e-06,
|
||||
"loss": 0.8947,
|
||||
"step": 40
|
||||
},
|
||||
{
|
||||
"epoch": 0.04498299080660125,
|
||||
"grad_norm": 1.4128774633791414,
|
||||
"learning_rate": 4.997607281643338e-06,
|
||||
"loss": 0.8764,
|
||||
"step": 50
|
||||
},
|
||||
{
|
||||
"epoch": 0.0539795889679215,
|
||||
"grad_norm": 1.4582318570520234,
|
||||
"learning_rate": 4.993355445074358e-06,
|
||||
"loss": 0.8758,
|
||||
"step": 60
|
||||
},
|
||||
{
|
||||
"epoch": 0.06297618712924176,
|
||||
"grad_norm": 1.241568669359911,
|
||||
"learning_rate": 4.986982212538754e-06,
|
||||
"loss": 0.8839,
|
||||
"step": 70
|
||||
},
|
||||
{
|
||||
"epoch": 0.071972785290562,
|
||||
"grad_norm": 1.4053494360834542,
|
||||
"learning_rate": 4.978493006508408e-06,
|
||||
"loss": 0.8741,
|
||||
"step": 80
|
||||
},
|
||||
{
|
||||
"epoch": 0.08096938345188226,
|
||||
"grad_norm": 1.4737910013351594,
|
||||
"learning_rate": 4.967895049767168e-06,
|
||||
"loss": 0.8859,
|
||||
"step": 90
|
||||
},
|
||||
{
|
||||
"epoch": 0.0899659816132025,
|
||||
"grad_norm": 1.3138356543765093,
|
||||
"learning_rate": 4.9551973592655565e-06,
|
||||
"loss": 0.8506,
|
||||
"step": 100
|
||||
},
|
||||
{
|
||||
"epoch": 0.09896257977452276,
|
||||
"grad_norm": 1.3758428201688702,
|
||||
"learning_rate": 4.940410738448974e-06,
|
||||
"loss": 0.9021,
|
||||
"step": 110
|
||||
},
|
||||
{
|
||||
"epoch": 0.107959177935843,
|
||||
"grad_norm": 1.4334286224262116,
|
||||
"learning_rate": 4.923547768065916e-06,
|
||||
"loss": 0.8752,
|
||||
"step": 120
|
||||
},
|
||||
{
|
||||
"epoch": 0.11695577609716326,
|
||||
"grad_norm": 1.3787955824383673,
|
||||
"learning_rate": 4.904622795464018e-06,
|
||||
"loss": 0.8824,
|
||||
"step": 130
|
||||
},
|
||||
{
|
||||
"epoch": 0.12595237425848352,
|
||||
"grad_norm": 1.3627983383165039,
|
||||
"learning_rate": 4.883651922383059e-06,
|
||||
"loss": 0.8519,
|
||||
"step": 140
|
||||
},
|
||||
{
|
||||
"epoch": 0.13494897241980378,
|
||||
"grad_norm": 1.3424633268190644,
|
||||
"learning_rate": 4.860652991255274e-06,
|
||||
"loss": 0.8565,
|
||||
"step": 150
|
||||
},
|
||||
{
|
||||
"epoch": 0.143945570581124,
|
||||
"grad_norm": 1.4227597124829214,
|
||||
"learning_rate": 4.835645570024666e-06,
|
||||
"loss": 0.8884,
|
||||
"step": 160
|
||||
},
|
||||
{
|
||||
"epoch": 0.15294216874244426,
|
||||
"grad_norm": 1.3745439024877604,
|
||||
"learning_rate": 4.808650935498216e-06,
|
||||
"loss": 0.8394,
|
||||
"step": 170
|
||||
},
|
||||
{
|
||||
"epoch": 0.16193876690376452,
|
||||
"grad_norm": 1.1913982454389247,
|
||||
"learning_rate": 4.779692055243149e-06,
|
||||
"loss": 0.8542,
|
||||
"step": 180
|
||||
},
|
||||
{
|
||||
"epoch": 0.17093536506508478,
|
||||
"grad_norm": 1.3183554482150048,
|
||||
"learning_rate": 4.748793568045682e-06,
|
||||
"loss": 0.8633,
|
||||
"step": 190
|
||||
},
|
||||
{
|
||||
"epoch": 0.179931963226405,
|
||||
"grad_norm": 1.4397694519607327,
|
||||
"learning_rate": 4.715981762947854e-06,
|
||||
"loss": 0.8532,
|
||||
"step": 200
|
||||
},
|
||||
{
|
||||
"epoch": 0.18892856138772526,
|
||||
"grad_norm": 1.2038269967391835,
|
||||
"learning_rate": 4.681284556880294e-06,
|
||||
"loss": 0.8544,
|
||||
"step": 210
|
||||
},
|
||||
{
|
||||
"epoch": 0.19792515954904552,
|
||||
"grad_norm": 1.2701132880820947,
|
||||
"learning_rate": 4.6447314709099436e-06,
|
||||
"loss": 0.8463,
|
||||
"step": 220
|
||||
},
|
||||
{
|
||||
"epoch": 0.20692175771036578,
|
||||
"grad_norm": 1.3356581457961003,
|
||||
"learning_rate": 4.606353605122954e-06,
|
||||
"loss": 0.8652,
|
||||
"step": 230
|
||||
},
|
||||
{
|
||||
"epoch": 0.215918355871686,
|
||||
"grad_norm": 1.3140257601689191,
|
||||
"learning_rate": 4.566183612164116e-06,
|
||||
"loss": 0.8737,
|
||||
"step": 240
|
||||
},
|
||||
{
|
||||
"epoch": 0.22491495403300626,
|
||||
"grad_norm": 1.2150684493886468,
|
||||
"learning_rate": 4.52425566945535e-06,
|
||||
"loss": 0.8448,
|
||||
"step": 250
|
||||
},
|
||||
{
|
||||
"epoch": 0.23391155219432652,
|
||||
"grad_norm": 1.3356045800197514,
|
||||
"learning_rate": 4.480605450116879e-06,
|
||||
"loss": 0.8531,
|
||||
"step": 260
|
||||
},
|
||||
{
|
||||
"epoch": 0.24290815035564678,
|
||||
"grad_norm": 1.3621827029728837,
|
||||
"learning_rate": 4.435270092615835e-06,
|
||||
"loss": 0.8569,
|
||||
"step": 270
|
||||
},
|
||||
{
|
||||
"epoch": 0.25190474851696704,
|
||||
"grad_norm": 1.2958362488368202,
|
||||
"learning_rate": 4.388288169168121e-06,
|
||||
"loss": 0.8559,
|
||||
"step": 280
|
||||
},
|
||||
{
|
||||
"epoch": 0.2609013466782873,
|
||||
"grad_norm": 1.315904667574477,
|
||||
"learning_rate": 4.339699652920407e-06,
|
||||
"loss": 0.8487,
|
||||
"step": 290
|
||||
},
|
||||
{
|
||||
"epoch": 0.26989794483960755,
|
||||
"grad_norm": 1.192500405781859,
|
||||
"learning_rate": 4.28954588394019e-06,
|
||||
"loss": 0.8427,
|
||||
"step": 300
|
||||
},
|
||||
{
|
||||
"epoch": 0.27889454300092775,
|
||||
"grad_norm": 1.3463647117462414,
|
||||
"learning_rate": 4.237869534042848e-06,
|
||||
"loss": 0.8645,
|
||||
"step": 310
|
||||
},
|
||||
{
|
||||
"epoch": 0.287891141162248,
|
||||
"grad_norm": 1.302446463869839,
|
||||
"learning_rate": 4.184714570485619e-06,
|
||||
"loss": 0.8739,
|
||||
"step": 320
|
||||
},
|
||||
{
|
||||
"epoch": 0.29688773932356827,
|
||||
"grad_norm": 1.2998957392856052,
|
||||
"learning_rate": 4.130126218559396e-06,
|
||||
"loss": 0.8388,
|
||||
"step": 330
|
||||
},
|
||||
{
|
||||
"epoch": 0.3058843374848885,
|
||||
"grad_norm": 1.1895515530331358,
|
||||
"learning_rate": 4.074150923110149e-06,
|
||||
"loss": 0.8344,
|
||||
"step": 340
|
||||
},
|
||||
{
|
||||
"epoch": 0.3148809356462088,
|
||||
"grad_norm": 1.2291112546963492,
|
||||
"learning_rate": 4.0168363090227425e-06,
|
||||
"loss": 0.8489,
|
||||
"step": 350
|
||||
},
|
||||
{
|
||||
"epoch": 0.32387753380752904,
|
||||
"grad_norm": 1.2187350361306888,
|
||||
"learning_rate": 3.958231140700742e-06,
|
||||
"loss": 0.8827,
|
||||
"step": 360
|
||||
},
|
||||
{
|
||||
"epoch": 0.3328741319688493,
|
||||
"grad_norm": 1.2251085843679796,
|
||||
"learning_rate": 3.898385280576696e-06,
|
||||
"loss": 0.8257,
|
||||
"step": 370
|
||||
},
|
||||
{
|
||||
"epoch": 0.34187073013016955,
|
||||
"grad_norm": 1.2708814495416274,
|
||||
"learning_rate": 3.8373496466881986e-06,
|
||||
"loss": 0.8586,
|
||||
"step": 380
|
||||
},
|
||||
{
|
||||
"epoch": 0.35086732829148976,
|
||||
"grad_norm": 1.3212728062854284,
|
||||
"learning_rate": 3.775176169355816e-06,
|
||||
"loss": 0.8633,
|
||||
"step": 390
|
||||
},
|
||||
{
|
||||
"epoch": 0.35986392645281,
|
||||
"grad_norm": 1.3153556245818046,
|
||||
"learning_rate": 3.7119177469997506e-06,
|
||||
"loss": 0.8385,
|
||||
"step": 400
|
||||
},
|
||||
{
|
||||
"epoch": 0.36886052461413027,
|
||||
"grad_norm": 1.2072741564966885,
|
||||
"learning_rate": 3.647628201132818e-06,
|
||||
"loss": 0.8167,
|
||||
"step": 410
|
||||
},
|
||||
{
|
||||
"epoch": 0.3778571227754505,
|
||||
"grad_norm": 1.2379563543143086,
|
||||
"learning_rate": 3.582362230568044e-06,
|
||||
"loss": 0.8331,
|
||||
"step": 420
|
||||
},
|
||||
{
|
||||
"epoch": 0.3868537209367708,
|
||||
"grad_norm": 1.2879168901856428,
|
||||
"learning_rate": 3.5161753648798367e-06,
|
||||
"loss": 0.8337,
|
||||
"step": 430
|
||||
},
|
||||
{
|
||||
"epoch": 0.39585031909809104,
|
||||
"grad_norm": 1.29505065565257,
|
||||
"learning_rate": 3.449123917158331e-06,
|
||||
"loss": 0.8371,
|
||||
"step": 440
|
||||
},
|
||||
{
|
||||
"epoch": 0.4048469172594113,
|
||||
"grad_norm": 1.2295083553559263,
|
||||
"learning_rate": 3.3812649360970988e-06,
|
||||
"loss": 0.8368,
|
||||
"step": 450
|
||||
},
|
||||
{
|
||||
"epoch": 0.41384351542073156,
|
||||
"grad_norm": 1.3554176223589234,
|
||||
"learning_rate": 3.3126561574549975e-06,
|
||||
"loss": 0.8322,
|
||||
"step": 460
|
||||
},
|
||||
{
|
||||
"epoch": 0.4228401135820518,
|
||||
"grad_norm": 1.3079414538272678,
|
||||
"learning_rate": 3.2433559549334475e-06,
|
||||
"loss": 0.8295,
|
||||
"step": 470
|
||||
},
|
||||
{
|
||||
"epoch": 0.431836711743372,
|
||||
"grad_norm": 1.2635231121274326,
|
||||
"learning_rate": 3.173423290510937e-06,
|
||||
"loss": 0.8343,
|
||||
"step": 480
|
||||
},
|
||||
{
|
||||
"epoch": 0.44083330990469227,
|
||||
"grad_norm": 1.3150644499613324,
|
||||
"learning_rate": 3.102917664277007e-06,
|
||||
"loss": 0.8548,
|
||||
"step": 490
|
||||
},
|
||||
{
|
||||
"epoch": 0.44982990806601253,
|
||||
"grad_norm": 1.366414847286227,
|
||||
"learning_rate": 3.0318990638084055e-06,
|
||||
"loss": 0.8558,
|
||||
"step": 500
|
||||
},
|
||||
{
|
||||
"epoch": 0.4588265062273328,
|
||||
"grad_norm": 1.2798875567653052,
|
||||
"learning_rate": 2.9604279131304685e-06,
|
||||
"loss": 0.8441,
|
||||
"step": 510
|
||||
},
|
||||
{
|
||||
"epoch": 0.46782310438865304,
|
||||
"grad_norm": 1.2205525459157436,
|
||||
"learning_rate": 2.8885650213071746e-06,
|
||||
"loss": 0.8346,
|
||||
"step": 520
|
||||
},
|
||||
{
|
||||
"epoch": 0.4768197025499733,
|
||||
"grad_norm": 1.3131391162470558,
|
||||
"learning_rate": 2.8163715307035897e-06,
|
||||
"loss": 0.8363,
|
||||
"step": 530
|
||||
},
|
||||
{
|
||||
"epoch": 0.48581630071129356,
|
||||
"grad_norm": 1.2817420417534302,
|
||||
"learning_rate": 2.743908864964741e-06,
|
||||
"loss": 0.8637,
|
||||
"step": 540
|
||||
},
|
||||
{
|
||||
"epoch": 0.4948128988726138,
|
||||
"grad_norm": 1.4117777334581971,
|
||||
"learning_rate": 2.6712386767551663e-06,
|
||||
"loss": 0.8439,
|
||||
"step": 550
|
||||
},
|
||||
{
|
||||
"epoch": 0.5038094970339341,
|
||||
"grad_norm": 1.3316023582639032,
|
||||
"learning_rate": 2.5984227953036124e-06,
|
||||
"loss": 0.8485,
|
||||
"step": 560
|
||||
},
|
||||
{
|
||||
"epoch": 0.5128060951952543,
|
||||
"grad_norm": 1.220300819321901,
|
||||
"learning_rate": 2.52552317379751e-06,
|
||||
"loss": 0.8279,
|
||||
"step": 570
|
||||
},
|
||||
{
|
||||
"epoch": 0.5218026933565746,
|
||||
"grad_norm": 1.2405242839005477,
|
||||
"learning_rate": 2.452601836671977e-06,
|
||||
"loss": 0.8744,
|
||||
"step": 580
|
||||
},
|
||||
{
|
||||
"epoch": 0.5307992915178948,
|
||||
"grad_norm": 1.160142751072927,
|
||||
"learning_rate": 2.3797208268382096e-06,
|
||||
"loss": 0.817,
|
||||
"step": 590
|
||||
},
|
||||
{
|
||||
"epoch": 0.5397958896792151,
|
||||
"grad_norm": 1.2558513559088742,
|
||||
"learning_rate": 2.3069421528961493e-06,
|
||||
"loss": 0.8454,
|
||||
"step": 600
|
||||
},
|
||||
{
|
||||
"epoch": 0.5487924878405352,
|
||||
"grad_norm": 1.2495369982031077,
|
||||
"learning_rate": 2.2343277363763437e-06,
|
||||
"loss": 0.821,
|
||||
"step": 610
|
||||
},
|
||||
{
|
||||
"epoch": 0.5577890860018555,
|
||||
"grad_norm": 1.2645404439096872,
|
||||
"learning_rate": 2.1619393590558857e-06,
|
||||
"loss": 0.8444,
|
||||
"step": 620
|
||||
},
|
||||
{
|
||||
"epoch": 0.5667856841631758,
|
||||
"grad_norm": 1.2701388082173177,
|
||||
"learning_rate": 2.0898386103932634e-06,
|
||||
"loss": 0.8296,
|
||||
"step": 630
|
||||
},
|
||||
{
|
||||
"epoch": 0.575782282324496,
|
||||
"grad_norm": 1.2620244980098876,
|
||||
"learning_rate": 2.018086835126831e-06,
|
||||
"loss": 0.8425,
|
||||
"step": 640
|
||||
},
|
||||
{
|
||||
"epoch": 0.5847788804858163,
|
||||
"grad_norm": 1.1594661341735868,
|
||||
"learning_rate": 1.9467450810814984e-06,
|
||||
"loss": 0.8356,
|
||||
"step": 650
|
||||
},
|
||||
{
|
||||
"epoch": 0.5937754786471365,
|
||||
"grad_norm": 1.2189222560662132,
|
||||
"learning_rate": 1.8758740472280372e-06,
|
||||
"loss": 0.8123,
|
||||
"step": 660
|
||||
},
|
||||
{
|
||||
"epoch": 0.6027720768084568,
|
||||
"grad_norm": 1.2444702547045237,
|
||||
"learning_rate": 1.8055340320392002e-06,
|
||||
"loss": 0.8124,
|
||||
"step": 670
|
||||
},
|
||||
{
|
||||
"epoch": 0.611768674969777,
|
||||
"grad_norm": 1.2315293692576161,
|
||||
"learning_rate": 1.7357848821865914e-06,
|
||||
"loss": 0.8469,
|
||||
"step": 680
|
||||
},
|
||||
{
|
||||
"epoch": 0.6207652731310973,
|
||||
"grad_norm": 1.2995895579660914,
|
||||
"learning_rate": 1.6666859416219384e-06,
|
||||
"loss": 0.8171,
|
||||
"step": 690
|
||||
},
|
||||
{
|
||||
"epoch": 0.6297618712924176,
|
||||
"grad_norm": 1.2091823608533048,
|
||||
"learning_rate": 1.5982960010860882e-06,
|
||||
"loss": 0.8105,
|
||||
"step": 700
|
||||
},
|
||||
{
|
||||
"epoch": 0.6387584694537378,
|
||||
"grad_norm": 1.2705740309640616,
|
||||
"learning_rate": 1.530673248088687e-06,
|
||||
"loss": 0.8342,
|
||||
"step": 710
|
||||
},
|
||||
{
|
||||
"epoch": 0.6477550676150581,
|
||||
"grad_norm": 1.2981521802148184,
|
||||
"learning_rate": 1.463875217401099e-06,
|
||||
"loss": 0.8715,
|
||||
"step": 720
|
||||
},
|
||||
{
|
||||
"epoch": 0.6567516657763783,
|
||||
"grad_norm": 1.1925093116586027,
|
||||
"learning_rate": 1.397958742104687e-06,
|
||||
"loss": 0.8355,
|
||||
"step": 730
|
||||
},
|
||||
{
|
||||
"epoch": 0.6657482639376986,
|
||||
"grad_norm": 1.1985043813628813,
|
||||
"learning_rate": 1.3329799052361094e-06,
|
||||
"loss": 0.8148,
|
||||
"step": 740
|
||||
},
|
||||
{
|
||||
"epoch": 0.6747448620990188,
|
||||
"grad_norm": 1.1010299588672359,
|
||||
"learning_rate": 1.2689939920707667e-06,
|
||||
"loss": 0.8229,
|
||||
"step": 750
|
||||
},
|
||||
{
|
||||
"epoch": 0.6837414602603391,
|
||||
"grad_norm": 1.2430718303897152,
|
||||
"learning_rate": 1.2060554430849951e-06,
|
||||
"loss": 0.8409,
|
||||
"step": 760
|
||||
},
|
||||
{
|
||||
"epoch": 0.6927380584216594,
|
||||
"grad_norm": 1.269761061806062,
|
||||
"learning_rate": 1.1442178076370426e-06,
|
||||
"loss": 0.8258,
|
||||
"step": 770
|
||||
},
|
||||
{
|
||||
"epoch": 0.7017346565829795,
|
||||
"grad_norm": 1.157246567841852,
|
||||
"learning_rate": 1.0835336984062122e-06,
|
||||
"loss": 0.8162,
|
||||
"step": 780
|
||||
},
|
||||
{
|
||||
"epoch": 0.7107312547442998,
|
||||
"grad_norm": 1.2172574409120116,
|
||||
"learning_rate": 1.0240547466289646e-06,
|
||||
"loss": 0.808,
|
||||
"step": 790
|
||||
},
|
||||
{
|
||||
"epoch": 0.71972785290562,
|
||||
"grad_norm": 1.2870129001272486,
|
||||
"learning_rate": 9.65831558170037e-07,
|
||||
"loss": 0.8314,
|
||||
"step": 800
|
||||
},
|
||||
{
|
||||
"epoch": 0.7287244510669403,
|
||||
"grad_norm": 1.2970054704276492,
|
||||
"learning_rate": 9.089136704659823e-07,
|
||||
"loss": 0.8437,
|
||||
"step": 810
|
||||
},
|
||||
{
|
||||
"epoch": 0.7377210492282605,
|
||||
"grad_norm": 1.1999856118846624,
|
||||
"learning_rate": 8.533495103777367e-07,
|
||||
"loss": 0.8052,
|
||||
"step": 820
|
||||
},
|
||||
{
|
||||
"epoch": 0.7467176473895808,
|
||||
"grad_norm": 1.233946813798178,
|
||||
"learning_rate": 7.991863529880936e-07,
|
||||
"loss": 0.7951,
|
||||
"step": 830
|
||||
},
|
||||
{
|
||||
"epoch": 0.755714245550901,
|
||||
"grad_norm": 1.2668438696715465,
|
||||
"learning_rate": 7.464702813791308e-07,
|
||||
"loss": 0.8461,
|
||||
"step": 840
|
||||
},
|
||||
{
|
||||
"epoch": 0.7647108437122213,
|
||||
"grad_norm": 1.2263597703226994,
|
||||
"learning_rate": 6.952461474238182e-07,
|
||||
"loss": 0.8045,
|
||||
"step": 850
|
||||
},
|
||||
{
|
||||
"epoch": 0.7737074418735416,
|
||||
"grad_norm": 1.2325614523967854,
|
||||
"learning_rate": 6.455575336251549e-07,
|
||||
"loss": 0.8562,
|
||||
"step": 860
|
||||
},
|
||||
{
|
||||
"epoch": 0.7827040400348618,
|
||||
"grad_norm": 1.243498384623771,
|
||||
"learning_rate": 5.974467160353226e-07,
|
||||
"loss": 0.8345,
|
||||
"step": 870
|
||||
},
|
||||
{
|
||||
"epoch": 0.7917006381961821,
|
||||
"grad_norm": 1.158311860753632,
|
||||
"learning_rate": 5.509546282863806e-07,
|
||||
"loss": 0.8344,
|
||||
"step": 880
|
||||
},
|
||||
{
|
||||
"epoch": 0.8006972363575023,
|
||||
"grad_norm": 1.2163583553306607,
|
||||
"learning_rate": 5.061208267631315e-07,
|
||||
"loss": 0.8256,
|
||||
"step": 890
|
||||
},
|
||||
{
|
||||
"epoch": 0.8096938345188226,
|
||||
"grad_norm": 1.2102463242463388,
|
||||
"learning_rate": 4.629834569477626e-07,
|
||||
"loss": 0.8427,
|
||||
"step": 900
|
||||
},
|
||||
{
|
||||
"epoch": 0.8186904326801429,
|
||||
"grad_norm": 1.1969681545587425,
|
||||
"learning_rate": 4.21579220964923e-07,
|
||||
"loss": 0.8195,
|
||||
"step": 910
|
||||
},
|
||||
{
|
||||
"epoch": 0.8276870308414631,
|
||||
"grad_norm": 1.2573696493635405,
|
||||
"learning_rate": 3.8194334635482777e-07,
|
||||
"loss": 0.8277,
|
||||
"step": 920
|
||||
},
|
||||
{
|
||||
"epoch": 0.8366836290027834,
|
||||
"grad_norm": 1.2979421497920385,
|
||||
"learning_rate": 3.4410955610097745e-07,
|
||||
"loss": 0.811,
|
||||
"step": 930
|
||||
},
|
||||
{
|
||||
"epoch": 0.8456802271641036,
|
||||
"grad_norm": 1.1946033424208522,
|
||||
"learning_rate": 3.0811003993797327e-07,
|
||||
"loss": 0.8187,
|
||||
"step": 940
|
||||
},
|
||||
{
|
||||
"epoch": 0.8546768253254238,
|
||||
"grad_norm": 1.1959972915578303,
|
||||
"learning_rate": 2.7397542696386145e-07,
|
||||
"loss": 0.8352,
|
||||
"step": 950
|
||||
},
|
||||
{
|
||||
"epoch": 0.863673423486744,
|
||||
"grad_norm": 1.2612864041438945,
|
||||
"learning_rate": 2.4173475958028855e-07,
|
||||
"loss": 0.8076,
|
||||
"step": 960
|
||||
},
|
||||
{
|
||||
"epoch": 0.8726700216480643,
|
||||
"grad_norm": 1.2393073900072233,
|
||||
"learning_rate": 2.1141546878265696e-07,
|
||||
"loss": 0.8357,
|
||||
"step": 970
|
||||
},
|
||||
{
|
||||
"epoch": 0.8816666198093845,
|
||||
"grad_norm": 1.3961098525762,
|
||||
"learning_rate": 1.8304335082129032e-07,
|
||||
"loss": 0.8375,
|
||||
"step": 980
|
||||
},
|
||||
{
|
||||
"epoch": 0.8906632179707048,
|
||||
"grad_norm": 1.2304674879812914,
|
||||
"learning_rate": 1.566425452534784e-07,
|
||||
"loss": 0.8482,
|
||||
"step": 990
|
||||
},
|
||||
{
|
||||
"epoch": 0.8996598161320251,
|
||||
"grad_norm": 1.1275632452975384,
|
||||
"learning_rate": 1.3223551440506244e-07,
|
||||
"loss": 0.8334,
|
||||
"step": 1000
|
||||
},
|
||||
{
|
||||
"epoch": 0.9086564142933453,
|
||||
"grad_norm": 1.1697390212239938,
|
||||
"learning_rate": 1.0984302425904869e-07,
|
||||
"loss": 0.8218,
|
||||
"step": 1010
|
||||
},
|
||||
{
|
||||
"epoch": 0.9176530124546656,
|
||||
"grad_norm": 1.1549729040350385,
|
||||
"learning_rate": 8.94841267874974e-08,
|
||||
"loss": 0.8242,
|
||||
"step": 1020
|
||||
},
|
||||
{
|
||||
"epoch": 0.9266496106159858,
|
||||
"grad_norm": 1.2550852449415948,
|
||||
"learning_rate": 7.117614374173353e-08,
|
||||
"loss": 0.8276,
|
||||
"step": 1030
|
||||
},
|
||||
{
|
||||
"epoch": 0.9356462087773061,
|
||||
"grad_norm": 1.1789618223129013,
|
||||
"learning_rate": 5.493465191465458e-08,
|
||||
"loss": 0.8399,
|
||||
"step": 1040
|
||||
},
|
||||
{
|
||||
"epoch": 0.9446428069386263,
|
||||
"grad_norm": 1.308555979964644,
|
||||
"learning_rate": 4.0773469887692154e-08,
|
||||
"loss": 0.8231,
|
||||
"step": 1050
|
||||
},
|
||||
{
|
||||
"epoch": 0.9536394050999466,
|
||||
"grad_norm": 1.1941396262140982,
|
||||
"learning_rate": 2.8704646273687298e-08,
|
||||
"loss": 0.8089,
|
||||
"step": 1060
|
||||
},
|
||||
{
|
||||
"epoch": 0.9626360032612669,
|
||||
"grad_norm": 1.2336678582691885,
|
||||
"learning_rate": 1.873844946569614e-08,
|
||||
"loss": 0.8546,
|
||||
"step": 1070
|
||||
},
|
||||
{
|
||||
"epoch": 0.9716326014225871,
|
||||
"grad_norm": 1.2123027368688872,
|
||||
"learning_rate": 1.0883358900435626e-08,
|
||||
"loss": 0.8213,
|
||||
"step": 1080
|
||||
},
|
||||
{
|
||||
"epoch": 0.9806291995839074,
|
||||
"grad_norm": 1.101205104982958,
|
||||
"learning_rate": 5.146057843814223e-09,
|
||||
"loss": 0.8261,
|
||||
"step": 1090
|
||||
},
|
||||
{
|
||||
"epoch": 0.9896257977452276,
|
||||
"grad_norm": 1.138150004760349,
|
||||
"learning_rate": 1.531427704675459e-09,
|
||||
"loss": 0.8431,
|
||||
"step": 1100
|
||||
},
|
||||
{
|
||||
"epoch": 0.9986223959065479,
|
||||
"grad_norm": 1.1694006998873614,
|
||||
"learning_rate": 4.25438816009649e-11,
|
||||
"loss": 0.8082,
|
||||
"step": 1110
|
||||
}
|
||||
],
|
||||
"logging_steps": 10,
|
||||
"max_steps": 1111,
|
||||
"num_input_tokens_seen": 0,
|
||||
"num_train_epochs": 1,
|
||||
"save_steps": 600,
|
||||
"stateful_callbacks": {
|
||||
"TrainerControl": {
|
||||
"args": {
|
||||
"should_epoch_stop": false,
|
||||
"should_evaluate": false,
|
||||
"should_log": false,
|
||||
"should_save": true,
|
||||
"should_training_stop": true
|
||||
},
|
||||
"attributes": {}
|
||||
}
|
||||
},
|
||||
"total_flos": 145966987345920.0,
|
||||
"train_batch_size": 2,
|
||||
"trial_name": null,
|
||||
"trial_params": null
|
||||
}
|
||||
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9bb25bd9bed986bab5e24d067e0cc007f63f1029d8b2d36e7cd337b9ded77ec3
|
||||
size 7736
|
||||
27
training_config.yaml
Normal file
27
training_config.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
bf16: true
|
||||
cutoff_len: 4096
|
||||
dataset: tulu3_mixture_coding
|
||||
ddp_timeout: 180000000
|
||||
deepspeed: examples/deepspeed/ds_z3_config.json
|
||||
do_train: true
|
||||
eval_strategy: 'no'
|
||||
finetuning_type: full
|
||||
gradient_accumulation_steps: 32
|
||||
learning_rate: 5.0e-06
|
||||
logging_steps: 10
|
||||
lr_scheduler_type: cosine
|
||||
model_name_or_path: meta-llama/Llama-3.1-8B
|
||||
num_train_epochs: 1.0
|
||||
output_dir: /scratch/pxm5426/runs/lora-exploration/llama-factory/Llama-3.1-8B_tulu3_mixture_coding_full_ebs128_lr5e-06
|
||||
overwrite_cache: true
|
||||
overwrite_output_dir: true
|
||||
per_device_train_batch_size: 2
|
||||
plot_loss: true
|
||||
preprocessing_num_workers: 16
|
||||
report_to: wandb
|
||||
save_steps: 600
|
||||
save_strategy: steps
|
||||
stage: sft
|
||||
template: tulu_v3
|
||||
trust_remote_code: true
|
||||
warmup_ratio: 0.03
|
||||
BIN
training_loss.png
Normal file
BIN
training_loss.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
760
zero_to_fp32.py
Normal file
760
zero_to_fp32.py
Normal file
@@ -0,0 +1,760 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) Microsoft Corporation.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# DeepSpeed Team
|
||||
|
||||
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
|
||||
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
||||
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
||||
# application.
|
||||
#
|
||||
# example:
|
||||
# python zero_to_fp32.py . output_dir/
|
||||
# or
|
||||
# python zero_to_fp32.py . output_dir/ --safe_serialization
|
||||
|
||||
import argparse
|
||||
import torch
|
||||
import glob
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import gc
|
||||
import json
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass
|
||||
|
||||
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
||||
# DeepSpeed data structures it has to be available in the current python environment.
|
||||
from deepspeed.utils import logger
|
||||
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
||||
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
||||
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
||||
|
||||
|
||||
@dataclass
|
||||
class zero_model_state:
|
||||
buffers: dict()
|
||||
param_shapes: dict()
|
||||
shared_params: list
|
||||
ds_version: int
|
||||
frozen_param_shapes: dict()
|
||||
frozen_param_fragments: dict()
|
||||
|
||||
|
||||
debug = 0
|
||||
|
||||
# load to cpu
|
||||
device = torch.device('cpu')
|
||||
|
||||
|
||||
def atoi(text):
|
||||
return int(text) if text.isdigit() else text
|
||||
|
||||
|
||||
def natural_keys(text):
|
||||
'''
|
||||
alist.sort(key=natural_keys) sorts in human order
|
||||
http://nedbatchelder.com/blog/200712/human_sorting.html
|
||||
(See Toothy's implementation in the comments)
|
||||
'''
|
||||
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
||||
|
||||
|
||||
def get_model_state_file(checkpoint_dir, zero_stage):
|
||||
if not os.path.isdir(checkpoint_dir):
|
||||
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
||||
|
||||
# there should be only one file
|
||||
if zero_stage <= 2:
|
||||
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
||||
elif zero_stage == 3:
|
||||
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
||||
|
||||
if not os.path.exists(file):
|
||||
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
||||
|
||||
return file
|
||||
|
||||
|
||||
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
||||
# XXX: need to test that this simple glob rule works for multi-node setup too
|
||||
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
||||
|
||||
if len(ckpt_files) == 0:
|
||||
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
||||
|
||||
return ckpt_files
|
||||
|
||||
|
||||
def get_optim_files(checkpoint_dir):
|
||||
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
||||
|
||||
|
||||
def get_model_state_files(checkpoint_dir):
|
||||
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
||||
|
||||
|
||||
def parse_model_states(files):
|
||||
zero_model_states = []
|
||||
for file in files:
|
||||
state_dict = torch.load(file, map_location=device, weights_only=False)
|
||||
|
||||
if BUFFER_NAMES not in state_dict:
|
||||
raise ValueError(f"{file} is not a model state checkpoint")
|
||||
buffer_names = state_dict[BUFFER_NAMES]
|
||||
if debug:
|
||||
print("Found buffers:", buffer_names)
|
||||
|
||||
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
||||
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
||||
param_shapes = state_dict[PARAM_SHAPES]
|
||||
|
||||
# collect parameters that are included in param_shapes
|
||||
param_names = []
|
||||
for s in param_shapes:
|
||||
for name in s.keys():
|
||||
param_names.append(name)
|
||||
|
||||
# update with frozen parameters
|
||||
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
||||
if frozen_param_shapes is not None:
|
||||
if debug:
|
||||
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
||||
param_names += list(frozen_param_shapes.keys())
|
||||
|
||||
# handle shared params
|
||||
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
||||
|
||||
ds_version = state_dict.get(DS_VERSION, None)
|
||||
|
||||
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
||||
|
||||
z_model_state = zero_model_state(buffers=buffers,
|
||||
param_shapes=param_shapes,
|
||||
shared_params=shared_params,
|
||||
ds_version=ds_version,
|
||||
frozen_param_shapes=frozen_param_shapes,
|
||||
frozen_param_fragments=frozen_param_fragments)
|
||||
zero_model_states.append(z_model_state)
|
||||
|
||||
return zero_model_states
|
||||
|
||||
|
||||
def parse_optim_states(files, ds_checkpoint_dir):
|
||||
total_files = len(files)
|
||||
state_dicts = []
|
||||
for f in tqdm(files, desc='Loading checkpoint shards'):
|
||||
state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
|
||||
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
|
||||
# and also handle the case where it was already removed by another helper script
|
||||
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
|
||||
state_dicts.append(state_dict)
|
||||
|
||||
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
||||
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
||||
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
||||
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
||||
|
||||
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
||||
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
||||
# use the max of the partition_count to get the dp world_size.
|
||||
|
||||
if type(world_size) is list:
|
||||
world_size = max(world_size)
|
||||
|
||||
if world_size != total_files:
|
||||
raise ValueError(
|
||||
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
||||
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
||||
)
|
||||
|
||||
# the groups are named differently in each stage
|
||||
if zero_stage <= 2:
|
||||
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
||||
elif zero_stage == 3:
|
||||
fp32_groups_key = FP32_FLAT_GROUPS
|
||||
else:
|
||||
raise ValueError(f"unknown zero stage {zero_stage}")
|
||||
|
||||
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
||||
return zero_stage, world_size, fp32_flat_groups
|
||||
|
||||
|
||||
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
|
||||
"""
|
||||
Returns fp32 state_dict reconstructed from ds checkpoint
|
||||
|
||||
Args:
|
||||
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
||||
|
||||
"""
|
||||
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
||||
|
||||
optim_files = get_optim_files(ds_checkpoint_dir)
|
||||
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
||||
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
||||
|
||||
model_files = get_model_state_files(ds_checkpoint_dir)
|
||||
|
||||
zero_model_states = parse_model_states(model_files)
|
||||
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
||||
|
||||
if zero_stage <= 2:
|
||||
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
||||
exclude_frozen_parameters)
|
||||
elif zero_stage == 3:
|
||||
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
||||
exclude_frozen_parameters)
|
||||
|
||||
|
||||
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
||||
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
||||
return
|
||||
|
||||
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
||||
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
||||
|
||||
if debug:
|
||||
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
||||
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
||||
|
||||
wanted_params = len(frozen_param_shapes)
|
||||
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
||||
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
||||
print(f'Frozen params: Have {avail_numel} numels to process.')
|
||||
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
||||
|
||||
total_params = 0
|
||||
total_numel = 0
|
||||
for name, shape in frozen_param_shapes.items():
|
||||
total_params += 1
|
||||
unpartitioned_numel = shape.numel()
|
||||
total_numel += unpartitioned_numel
|
||||
|
||||
state_dict[name] = frozen_param_fragments[name]
|
||||
|
||||
if debug:
|
||||
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
||||
|
||||
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
||||
|
||||
|
||||
def _has_callable(obj, fn):
|
||||
attr = getattr(obj, fn, None)
|
||||
return callable(attr)
|
||||
|
||||
|
||||
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
||||
param_shapes = zero_model_states[0].param_shapes
|
||||
|
||||
# Reconstruction protocol:
|
||||
#
|
||||
# XXX: document this
|
||||
|
||||
if debug:
|
||||
for i in range(world_size):
|
||||
for j in range(len(fp32_flat_groups[0])):
|
||||
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
||||
|
||||
# XXX: memory usage doubles here (zero2)
|
||||
num_param_groups = len(fp32_flat_groups[0])
|
||||
merged_single_partition_of_fp32_groups = []
|
||||
for i in range(num_param_groups):
|
||||
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
||||
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
||||
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
||||
avail_numel = sum(
|
||||
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
||||
|
||||
if debug:
|
||||
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
||||
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
||||
# not asserting if there is a mismatch due to possible padding
|
||||
print(f"Have {avail_numel} numels to process.")
|
||||
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
||||
|
||||
# params
|
||||
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
||||
# out-of-core computing solution
|
||||
total_numel = 0
|
||||
total_params = 0
|
||||
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
||||
offset = 0
|
||||
avail_numel = full_single_fp32_vector.numel()
|
||||
for name, shape in shapes.items():
|
||||
|
||||
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
|
||||
total_numel += unpartitioned_numel
|
||||
total_params += 1
|
||||
|
||||
if debug:
|
||||
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
||||
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
||||
offset += unpartitioned_numel
|
||||
|
||||
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
||||
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
||||
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
||||
# live optimizer object, so we are checking that the numbers are within the right range
|
||||
align_to = 2 * world_size
|
||||
|
||||
def zero2_align(x):
|
||||
return align_to * math.ceil(x / align_to)
|
||||
|
||||
if debug:
|
||||
print(f"original offset={offset}, avail_numel={avail_numel}")
|
||||
|
||||
offset = zero2_align(offset)
|
||||
avail_numel = zero2_align(avail_numel)
|
||||
|
||||
if debug:
|
||||
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
||||
|
||||
# Sanity check
|
||||
if offset != avail_numel:
|
||||
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
||||
|
||||
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
||||
|
||||
|
||||
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
||||
exclude_frozen_parameters):
|
||||
state_dict = OrderedDict()
|
||||
|
||||
# buffers
|
||||
buffers = zero_model_states[0].buffers
|
||||
state_dict.update(buffers)
|
||||
if debug:
|
||||
print(f"added {len(buffers)} buffers")
|
||||
|
||||
if not exclude_frozen_parameters:
|
||||
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
||||
|
||||
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
||||
|
||||
# recover shared parameters
|
||||
for pair in zero_model_states[0].shared_params:
|
||||
if pair[1] in state_dict:
|
||||
state_dict[pair[0]] = state_dict[pair[1]]
|
||||
|
||||
return state_dict
|
||||
|
||||
|
||||
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
||||
remainder = unpartitioned_numel % world_size
|
||||
padding_numel = (world_size - remainder) if remainder else 0
|
||||
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
||||
return partitioned_numel, padding_numel
|
||||
|
||||
|
||||
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
||||
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
||||
return
|
||||
|
||||
if debug:
|
||||
for i in range(world_size):
|
||||
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
||||
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
||||
|
||||
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
||||
wanted_params = len(frozen_param_shapes)
|
||||
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
||||
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
||||
print(f'Frozen params: Have {avail_numel} numels to process.')
|
||||
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
||||
|
||||
total_params = 0
|
||||
total_numel = 0
|
||||
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
||||
total_params += 1
|
||||
unpartitioned_numel = shape.numel()
|
||||
total_numel += unpartitioned_numel
|
||||
|
||||
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
||||
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
||||
|
||||
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
||||
|
||||
if debug:
|
||||
print(
|
||||
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
||||
)
|
||||
|
||||
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
||||
|
||||
|
||||
class GatheredTensor:
|
||||
"""
|
||||
A pseudo tensor that collects partitioned weights.
|
||||
It is more memory efficient when there are multiple groups.
|
||||
"""
|
||||
|
||||
def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
|
||||
self.flat_groups = flat_groups
|
||||
self.flat_groups_offset = flat_groups_offset
|
||||
self.offset = offset
|
||||
self.partitioned_numel = partitioned_numel
|
||||
self.shape = shape
|
||||
self.dtype = self.flat_groups[0][0].dtype
|
||||
|
||||
def contiguous(self):
|
||||
"""
|
||||
Merge partitioned weights from flat_groups into a single tensor.
|
||||
"""
|
||||
end_idx = self.offset + self.partitioned_numel
|
||||
world_size = len(self.flat_groups)
|
||||
pad_flat_param_chunks = []
|
||||
|
||||
for rank_i in range(world_size):
|
||||
# for each rank, we need to collect weights from related group/groups
|
||||
flat_groups_at_rank_i = self.flat_groups[rank_i]
|
||||
start_group_id = None
|
||||
end_group_id = None
|
||||
for group_id in range(len(self.flat_groups_offset)):
|
||||
if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
|
||||
start_group_id = group_id
|
||||
if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
|
||||
end_group_id = group_id
|
||||
break
|
||||
# collect weights from related group/groups
|
||||
for group_id in range(start_group_id, end_group_id + 1):
|
||||
flat_tensor = flat_groups_at_rank_i[group_id]
|
||||
start_offset = self.offset - self.flat_groups_offset[group_id]
|
||||
end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
|
||||
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
|
||||
|
||||
# collect weights from all ranks
|
||||
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
|
||||
param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
|
||||
return param
|
||||
|
||||
|
||||
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
||||
param_shapes = zero_model_states[0].param_shapes
|
||||
avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
|
||||
|
||||
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
||||
# param, re-consolidating each param, while dealing with padding if any
|
||||
|
||||
# merge list of dicts, preserving order
|
||||
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
||||
|
||||
if debug:
|
||||
for i in range(world_size):
|
||||
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
||||
|
||||
wanted_params = len(param_shapes)
|
||||
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
||||
# not asserting if there is a mismatch due to possible padding
|
||||
avail_numel = fp32_flat_groups[0].numel() * world_size
|
||||
print(f"Trainable params: Have {avail_numel} numels to process.")
|
||||
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
||||
|
||||
# params
|
||||
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
||||
# out-of-core computing solution
|
||||
offset = 0
|
||||
total_numel = 0
|
||||
total_params = 0
|
||||
flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
|
||||
for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
|
||||
unpartitioned_numel = shape.numel()
|
||||
total_numel += unpartitioned_numel
|
||||
total_params += 1
|
||||
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
||||
|
||||
if debug:
|
||||
print(
|
||||
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
||||
)
|
||||
|
||||
# memory efficient tensor
|
||||
tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
|
||||
state_dict[name] = tensor
|
||||
offset += partitioned_numel
|
||||
|
||||
offset *= world_size
|
||||
|
||||
# Sanity check
|
||||
if offset != avail_numel:
|
||||
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
||||
|
||||
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
||||
|
||||
|
||||
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
|
||||
exclude_frozen_parameters):
|
||||
state_dict = OrderedDict()
|
||||
|
||||
# buffers
|
||||
buffers = zero_model_states[0].buffers
|
||||
state_dict.update(buffers)
|
||||
if debug:
|
||||
print(f"added {len(buffers)} buffers")
|
||||
|
||||
if not exclude_frozen_parameters:
|
||||
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
||||
|
||||
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
||||
|
||||
# recover shared parameters
|
||||
for pair in zero_model_states[0].shared_params:
|
||||
if pair[1] in state_dict:
|
||||
state_dict[pair[0]] = state_dict[pair[1]]
|
||||
|
||||
return state_dict
|
||||
|
||||
|
||||
def to_torch_tensor(state_dict, return_empty_tensor=False):
|
||||
"""
|
||||
Convert state_dict of GatheredTensor to torch tensor
|
||||
"""
|
||||
torch_state_dict = {}
|
||||
converted_tensors = {}
|
||||
for name, tensor in state_dict.items():
|
||||
tensor_id = id(tensor)
|
||||
if tensor_id in converted_tensors: # shared tensors
|
||||
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
|
||||
torch_state_dict[name] = shared_tensor
|
||||
else:
|
||||
converted_tensors[tensor_id] = name
|
||||
if return_empty_tensor:
|
||||
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
|
||||
else:
|
||||
torch_state_dict[name] = tensor.contiguous()
|
||||
return torch_state_dict
|
||||
|
||||
|
||||
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
||||
tag=None,
|
||||
exclude_frozen_parameters=False,
|
||||
lazy_mode=False):
|
||||
"""
|
||||
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
||||
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
||||
via a model hub.
|
||||
|
||||
Args:
|
||||
- ``checkpoint_dir``: path to the desired checkpoint folder
|
||||
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
||||
- ``exclude_frozen_parameters``: exclude frozen parameters
|
||||
- ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
|
||||
Convert the pesduo tensor to torch tensor by ``.contiguous()``
|
||||
|
||||
Returns:
|
||||
- pytorch ``state_dict``
|
||||
|
||||
A typical usage might be ::
|
||||
|
||||
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
||||
# do the training and checkpoint saving
|
||||
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
||||
model = model.cpu() # move to cpu
|
||||
model.load_state_dict(state_dict)
|
||||
# submit to model hub or save the model to share with others
|
||||
|
||||
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
||||
application. i.e. you will need to re-initialize the deepspeed engine, since
|
||||
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
||||
|
||||
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
||||
|
||||
Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
|
||||
You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
||||
the checkpoint. Or you can load state_dict in lazy mode ::
|
||||
|
||||
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
||||
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
|
||||
for name, lazy_tensor in state_dict.item():
|
||||
tensor = lazy_tensor.contiguous() # to cpu
|
||||
print(name, tensor)
|
||||
# del tensor to release memory if it no longer in use
|
||||
"""
|
||||
if tag is None:
|
||||
latest_path = os.path.join(checkpoint_dir, 'latest')
|
||||
if os.path.isfile(latest_path):
|
||||
with open(latest_path, 'r') as fd:
|
||||
tag = fd.read().strip()
|
||||
else:
|
||||
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
||||
|
||||
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
||||
|
||||
if not os.path.isdir(ds_checkpoint_dir):
|
||||
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
||||
|
||||
state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
|
||||
if lazy_mode:
|
||||
return state_dict
|
||||
else:
|
||||
return to_torch_tensor(state_dict)
|
||||
|
||||
|
||||
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
|
||||
output_dir,
|
||||
max_shard_size="5GB",
|
||||
safe_serialization=False,
|
||||
tag=None,
|
||||
exclude_frozen_parameters=False):
|
||||
"""
|
||||
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
||||
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
||||
|
||||
Args:
|
||||
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
||||
- ``output_dir``: directory to the pytorch fp32 state_dict output files
|
||||
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
|
||||
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
||||
- ``exclude_frozen_parameters``: exclude frozen parameters
|
||||
"""
|
||||
|
||||
# Dependency pre-check
|
||||
if safe_serialization:
|
||||
try:
|
||||
from safetensors.torch import save_file
|
||||
except ImportError:
|
||||
print('If you want to use `safe_serialization`, please `pip install safetensors`')
|
||||
raise
|
||||
if max_shard_size is not None:
|
||||
try:
|
||||
from huggingface_hub import split_torch_state_dict_into_shards
|
||||
except ImportError:
|
||||
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
|
||||
raise
|
||||
|
||||
# Convert zero checkpoint to state_dict
|
||||
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
|
||||
tag,
|
||||
exclude_frozen_parameters,
|
||||
lazy_mode=True)
|
||||
|
||||
# Shard the model if it is too big.
|
||||
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
|
||||
if max_shard_size is not None:
|
||||
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
|
||||
# an memory-efficient approach for sharding
|
||||
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
|
||||
state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
|
||||
filename_pattern=filename_pattern,
|
||||
max_shard_size=max_shard_size)
|
||||
else:
|
||||
from collections import namedtuple
|
||||
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
|
||||
state_dict_split = StateDictSplit(is_sharded=False,
|
||||
filename_to_tensors={weights_name: list(state_dict.keys())})
|
||||
|
||||
# Save the model by shard
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
filename_to_tensors = state_dict_split.filename_to_tensors.items()
|
||||
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
|
||||
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
|
||||
shard_state_dict = to_torch_tensor(shard_state_dict)
|
||||
output_path = os.path.join(output_dir, shard_file)
|
||||
if safe_serialization:
|
||||
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
|
||||
else:
|
||||
torch.save(shard_state_dict, output_path)
|
||||
# release the memory of current shard
|
||||
for tensor_name in list(shard_state_dict.keys()):
|
||||
del state_dict[tensor_name]
|
||||
del shard_state_dict[tensor_name]
|
||||
del shard_state_dict
|
||||
gc.collect()
|
||||
|
||||
# Save index if sharded
|
||||
if state_dict_split.is_sharded:
|
||||
index = {
|
||||
"metadata": state_dict_split.metadata,
|
||||
"weight_map": state_dict_split.tensor_to_filename,
|
||||
}
|
||||
save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
|
||||
save_index_file = os.path.join(output_dir, save_index_file)
|
||||
with open(save_index_file, "w", encoding="utf-8") as f:
|
||||
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
||||
f.write(content)
|
||||
|
||||
|
||||
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
||||
"""
|
||||
1. Put the provided model to cpu
|
||||
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
||||
3. Load it into the provided model
|
||||
|
||||
Args:
|
||||
- ``model``: the model object to update
|
||||
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
||||
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
||||
|
||||
Returns:
|
||||
- ``model`: modified model
|
||||
|
||||
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
||||
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
||||
conveniently placed for you in the checkpoint folder.
|
||||
|
||||
A typical usage might be ::
|
||||
|
||||
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
||||
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
||||
# submit to model hub or save the model to share with others
|
||||
|
||||
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
||||
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
||||
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
||||
|
||||
"""
|
||||
logger.info(f"Extracting fp32 weights")
|
||||
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
||||
|
||||
logger.info(f"Overwriting model with fp32 weights")
|
||||
model = model.cpu()
|
||||
model.load_state_dict(state_dict, strict=False)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("checkpoint_dir",
|
||||
type=str,
|
||||
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
||||
parser.add_argument("output_dir",
|
||||
type=str,
|
||||
help="directory to the pytorch fp32 state_dict output files"
|
||||
"(e.g. path/checkpoint-12-output/)")
|
||||
parser.add_argument(
|
||||
"--max_shard_size",
|
||||
type=str,
|
||||
default="5GB",
|
||||
help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
|
||||
"lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
|
||||
"We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
|
||||
"without CPU OOM issues.")
|
||||
parser.add_argument(
|
||||
"--safe_serialization",
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
|
||||
parser.add_argument("-t",
|
||||
"--tag",
|
||||
type=str,
|
||||
default=None,
|
||||
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
|
||||
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
|
||||
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
||||
args = parser.parse_args()
|
||||
|
||||
debug = args.debug
|
||||
|
||||
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
|
||||
args.output_dir,
|
||||
max_shard_size=args.max_shard_size,
|
||||
safe_serialization=args.safe_serialization,
|
||||
tag=args.tag,
|
||||
exclude_frozen_parameters=args.exclude_frozen_parameters)
|
||||
Reference in New Issue
Block a user