初始化项目,由ModelHub XC社区提供模型
Model: introtollm/qwen2.5-3B-cb-1_0 Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||
58
README.md
Normal file
58
README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
library_name: transformers
|
||||
license: other
|
||||
base_model: Qwen/Qwen2.5-3B
|
||||
tags:
|
||||
- llama-factory
|
||||
- full
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: qwen2.5-3B-cb-1_0
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# qwen2.5-3B-cb-1_0
|
||||
|
||||
This model is a fine-tuned version of [Qwen/Qwen2.5-3B](https://huggingface.co/Qwen/Qwen2.5-3B) on the cb_1_0_50000 dataset.
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 2e-05
|
||||
- train_batch_size: 1
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- gradient_accumulation_steps: 8
|
||||
- total_train_batch_size: 8
|
||||
- optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.95) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
||||
- lr_scheduler_type: cosine
|
||||
- lr_scheduler_warmup_steps: 42
|
||||
- training_steps: 2109
|
||||
|
||||
### Training results
|
||||
|
||||
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.57.1
|
||||
- Pytorch 2.10.0+cu128
|
||||
- Datasets 4.0.0
|
||||
- Tokenizers 0.22.2
|
||||
24
added_tokens.json
Normal file
24
added_tokens.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"</tool_call>": 151658,
|
||||
"<tool_call>": 151657,
|
||||
"<|box_end|>": 151649,
|
||||
"<|box_start|>": 151648,
|
||||
"<|endoftext|>": 151643,
|
||||
"<|file_sep|>": 151664,
|
||||
"<|fim_middle|>": 151660,
|
||||
"<|fim_pad|>": 151662,
|
||||
"<|fim_prefix|>": 151659,
|
||||
"<|fim_suffix|>": 151661,
|
||||
"<|im_end|>": 151645,
|
||||
"<|im_start|>": 151644,
|
||||
"<|image_pad|>": 151655,
|
||||
"<|object_ref_end|>": 151647,
|
||||
"<|object_ref_start|>": 151646,
|
||||
"<|quad_end|>": 151651,
|
||||
"<|quad_start|>": 151650,
|
||||
"<|repo_name|>": 151663,
|
||||
"<|video_pad|>": 151656,
|
||||
"<|vision_end|>": 151653,
|
||||
"<|vision_pad|>": 151654,
|
||||
"<|vision_start|>": 151652
|
||||
}
|
||||
8
all_results.json
Normal file
8
all_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 1.469850121993726,
|
||||
"total_flos": 5.7513841193385984e+17,
|
||||
"train_loss": 0.5876018210235169,
|
||||
"train_runtime": 8852.0342,
|
||||
"train_samples_per_second": 1.906,
|
||||
"train_steps_per_second": 0.238
|
||||
}
|
||||
54
chat_template.jinja
Normal file
54
chat_template.jinja
Normal file
@@ -0,0 +1,54 @@
|
||||
{%- if tools %}
|
||||
{{- '<|im_start|>system\n' }}
|
||||
{%- if messages[0]['role'] == 'system' %}
|
||||
{{- messages[0]['content'] }}
|
||||
{%- else %}
|
||||
{{- 'You are a helpful assistant.' }}
|
||||
{%- endif %}
|
||||
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
||||
{%- for tool in tools %}
|
||||
{{- "\n" }}
|
||||
{{- tool | tojson }}
|
||||
{%- endfor %}
|
||||
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
||||
{%- else %}
|
||||
{%- if messages[0]['role'] == 'system' %}
|
||||
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
||||
{%- else %}
|
||||
{{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- for message in messages %}
|
||||
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
||||
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
||||
{%- elif message.role == "assistant" %}
|
||||
{{- '<|im_start|>' + message.role }}
|
||||
{%- if message.content %}
|
||||
{{- '\n' + message.content }}
|
||||
{%- endif %}
|
||||
{%- for tool_call in message.tool_calls %}
|
||||
{%- if tool_call.function is defined %}
|
||||
{%- set tool_call = tool_call.function %}
|
||||
{%- endif %}
|
||||
{{- '\n<tool_call>\n{"name": "' }}
|
||||
{{- tool_call.name }}
|
||||
{{- '", "arguments": ' }}
|
||||
{{- tool_call.arguments | tojson }}
|
||||
{{- '}\n</tool_call>' }}
|
||||
{%- endfor %}
|
||||
{{- '<|im_end|>\n' }}
|
||||
{%- elif message.role == "tool" %}
|
||||
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
||||
{{- '<|im_start|>user' }}
|
||||
{%- endif %}
|
||||
{{- '\n<tool_response>\n' }}
|
||||
{{- message.content }}
|
||||
{{- '\n</tool_response>' }}
|
||||
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
||||
{{- '<|im_end|>\n' }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- if add_generation_prompt %}
|
||||
{{- '<|im_start|>assistant\n' }}
|
||||
{%- endif %}
|
||||
67
config.json
Normal file
67
config.json
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"architectures": [
|
||||
"Qwen2ForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"dtype": "float32",
|
||||
"eos_token_id": 151643,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 11008,
|
||||
"layer_types": [
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention"
|
||||
],
|
||||
"max_position_embeddings": 32768,
|
||||
"max_window_layers": 36,
|
||||
"model_type": "qwen2",
|
||||
"num_attention_heads": 16,
|
||||
"num_hidden_layers": 36,
|
||||
"num_key_value_heads": 2,
|
||||
"pad_token_id": 151643,
|
||||
"rms_norm_eps": 1e-06,
|
||||
"rope_scaling": null,
|
||||
"rope_theta": 1000000.0,
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": true,
|
||||
"transformers_version": "4.57.1",
|
||||
"use_cache": false,
|
||||
"use_mrope": false,
|
||||
"use_sliding_window": false,
|
||||
"vocab_size": 151936
|
||||
}
|
||||
8
generation_config.json
Normal file
8
generation_config.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"eos_token_id": [
|
||||
151643
|
||||
],
|
||||
"max_new_tokens": 2048,
|
||||
"pad_token_id": 151643,
|
||||
"transformers_version": "4.57.1"
|
||||
}
|
||||
151388
merges.txt
Normal file
151388
merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
3
model-00001-of-00003.safetensors
Normal file
3
model-00001-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8b4bb38ca4760cfab0e3a387a557330cdebc7a1096df4942f2ed9e50baf94673
|
||||
size 4982131536
|
||||
3
model-00002-of-00003.safetensors
Normal file
3
model-00002-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3574b48d87245a5cacf337aa80b32ffd181d384b0a181d3c99940181bbcabeca
|
||||
size 4932949336
|
||||
3
model-00003-of-00003.safetensors
Normal file
3
model-00003-of-00003.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:bbc2c83ec4847ffc7f0169b35c7105976458656daefd83dd5a562eecd514c8d1
|
||||
size 2428723160
|
||||
442
model.safetensors.index.json
Normal file
442
model.safetensors.index.json
Normal file
@@ -0,0 +1,442 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_parameters": 3085938688,
|
||||
"total_size": 12343754752
|
||||
},
|
||||
"weight_map": {
|
||||
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
||||
"model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.32.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.33.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.34.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.k_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.q_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.v_proj.bias": "model-00003-of-00003.safetensors",
|
||||
"model.layers.35.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
||||
"model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.bias": "model-00001-of-00003.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
||||
"model.norm.weight": "model-00003-of-00003.safetensors"
|
||||
}
|
||||
}
|
||||
31
special_tokens_map.json
Normal file
31
special_tokens_map.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>",
|
||||
"<|object_ref_start|>",
|
||||
"<|object_ref_end|>",
|
||||
"<|box_start|>",
|
||||
"<|box_end|>",
|
||||
"<|quad_start|>",
|
||||
"<|quad_end|>",
|
||||
"<|vision_start|>",
|
||||
"<|vision_end|>",
|
||||
"<|vision_pad|>",
|
||||
"<|image_pad|>",
|
||||
"<|video_pad|>"
|
||||
],
|
||||
"eos_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
|
||||
size 11421896
|
||||
208
tokenizer_config.json
Normal file
208
tokenizer_config.json
Normal file
@@ -0,0 +1,208 @@
|
||||
{
|
||||
"add_bos_token": false,
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"151643": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151644": {
|
||||
"content": "<|im_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151645": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151646": {
|
||||
"content": "<|object_ref_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151647": {
|
||||
"content": "<|object_ref_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151648": {
|
||||
"content": "<|box_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151649": {
|
||||
"content": "<|box_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151650": {
|
||||
"content": "<|quad_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151651": {
|
||||
"content": "<|quad_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151652": {
|
||||
"content": "<|vision_start|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151653": {
|
||||
"content": "<|vision_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151654": {
|
||||
"content": "<|vision_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151655": {
|
||||
"content": "<|image_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151656": {
|
||||
"content": "<|video_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"151657": {
|
||||
"content": "<tool_call>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151658": {
|
||||
"content": "</tool_call>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151659": {
|
||||
"content": "<|fim_prefix|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151660": {
|
||||
"content": "<|fim_middle|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151661": {
|
||||
"content": "<|fim_suffix|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151662": {
|
||||
"content": "<|fim_pad|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151663": {
|
||||
"content": "<|repo_name|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
},
|
||||
"151664": {
|
||||
"content": "<|file_sep|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": false
|
||||
}
|
||||
},
|
||||
"additional_special_tokens": [
|
||||
"<|im_start|>",
|
||||
"<|im_end|>",
|
||||
"<|object_ref_start|>",
|
||||
"<|object_ref_end|>",
|
||||
"<|box_start|>",
|
||||
"<|box_end|>",
|
||||
"<|quad_start|>",
|
||||
"<|quad_end|>",
|
||||
"<|vision_start|>",
|
||||
"<|vision_end|>",
|
||||
"<|vision_pad|>",
|
||||
"<|image_pad|>",
|
||||
"<|video_pad|>"
|
||||
],
|
||||
"bos_token": null,
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|endoftext|>",
|
||||
"errors": "replace",
|
||||
"extra_special_tokens": {},
|
||||
"model_max_length": 131072,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"padding_side": "right",
|
||||
"split_special_tokens": false,
|
||||
"tokenizer_class": "Qwen2Tokenizer",
|
||||
"unk_token": null
|
||||
}
|
||||
8
train_results.json
Normal file
8
train_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 1.469850121993726,
|
||||
"total_flos": 5.7513841193385984e+17,
|
||||
"train_loss": 0.5876018210235169,
|
||||
"train_runtime": 8852.0342,
|
||||
"train_samples_per_second": 1.906,
|
||||
"train_steps_per_second": 0.238
|
||||
}
|
||||
43
trainer_log.jsonl
Normal file
43
trainer_log.jsonl
Normal file
@@ -0,0 +1,43 @@
|
||||
{"current_steps": 1, "total_steps": 2109, "loss": 1.1855, "lr": 0.0, "epoch": 0.0006971070059254096, "percentage": 0.05, "elapsed_time": "0:00:04", "remaining_time": "2:34:40"}
|
||||
{"current_steps": 50, "total_steps": 2109, "loss": 0.9795, "lr": 1.9999434046461045e-05, "epoch": 0.03485535029627048, "percentage": 2.37, "elapsed_time": "0:02:51", "remaining_time": "1:57:56"}
|
||||
{"current_steps": 100, "total_steps": 2109, "loss": 0.8103, "lr": 1.996249692618611e-05, "epoch": 0.06971070059254096, "percentage": 4.74, "elapsed_time": "0:05:42", "remaining_time": "1:54:49"}
|
||||
{"current_steps": 150, "total_steps": 2109, "loss": 0.7672, "lr": 1.9868053167196865e-05, "epoch": 0.10456605088881143, "percentage": 7.11, "elapsed_time": "0:08:33", "remaining_time": "1:51:52"}
|
||||
{"current_steps": 200, "total_steps": 2109, "loss": 0.7452, "lr": 1.971664792831919e-05, "epoch": 0.13942140118508192, "percentage": 9.48, "elapsed_time": "0:11:25", "remaining_time": "1:48:59"}
|
||||
{"current_steps": 250, "total_steps": 2109, "loss": 0.7248, "lr": 1.9509155167802316e-05, "epoch": 0.17427675148135238, "percentage": 11.85, "elapsed_time": "0:14:16", "remaining_time": "1:46:06"}
|
||||
{"current_steps": 300, "total_steps": 2109, "loss": 0.7107, "lr": 1.9246772598559302e-05, "epoch": 0.20913210177762287, "percentage": 14.22, "elapsed_time": "0:17:07", "remaining_time": "1:43:14"}
|
||||
{"current_steps": 350, "total_steps": 2109, "loss": 0.7002, "lr": 1.8931014774594656e-05, "epoch": 0.24398745207389333, "percentage": 16.6, "elapsed_time": "0:19:58", "remaining_time": "1:40:22"}
|
||||
{"current_steps": 400, "total_steps": 2109, "loss": 0.6954, "lr": 1.8563704348526337e-05, "epoch": 0.27884280237016384, "percentage": 18.97, "elapsed_time": "0:22:49", "remaining_time": "1:37:31"}
|
||||
{"current_steps": 450, "total_steps": 2109, "loss": 0.6895, "lr": 1.8146961550666525e-05, "epoch": 0.3136981526664343, "percentage": 21.34, "elapsed_time": "0:25:40", "remaining_time": "1:34:40"}
|
||||
{"current_steps": 500, "total_steps": 2109, "loss": 0.6779, "lr": 1.7683191950391142e-05, "epoch": 0.34855350296270476, "percentage": 23.71, "elapsed_time": "0:28:32", "remaining_time": "1:31:49"}
|
||||
{"current_steps": 550, "total_steps": 2109, "loss": 0.6669, "lr": 1.717507257044331e-05, "epoch": 0.3834088532589753, "percentage": 26.08, "elapsed_time": "0:33:24", "remaining_time": "1:34:43"}
|
||||
{"current_steps": 600, "total_steps": 2109, "loss": 0.663, "lr": 1.6625536434323358e-05, "epoch": 0.41826420355524574, "percentage": 28.45, "elapsed_time": "0:36:16", "remaining_time": "1:31:13"}
|
||||
{"current_steps": 650, "total_steps": 2109, "loss": 0.6581, "lr": 1.6037755635962587e-05, "epoch": 0.4531195538515162, "percentage": 30.82, "elapsed_time": "0:39:07", "remaining_time": "1:27:50"}
|
||||
{"current_steps": 700, "total_steps": 2109, "loss": 0.6487, "lr": 1.5415123029408046e-05, "epoch": 0.48797490414778666, "percentage": 33.19, "elapsed_time": "0:41:59", "remaining_time": "1:24:30"}
|
||||
{"current_steps": 750, "total_steps": 2109, "loss": 0.645, "lr": 1.4761232644210963e-05, "epoch": 0.5228302544440572, "percentage": 35.56, "elapsed_time": "0:44:50", "remaining_time": "1:21:15"}
|
||||
{"current_steps": 800, "total_steps": 2109, "loss": 0.6366, "lr": 1.4079858939567557e-05, "epoch": 0.5576856047403277, "percentage": 37.93, "elapsed_time": "0:47:41", "remaining_time": "1:18:02"}
|
||||
{"current_steps": 850, "total_steps": 2109, "loss": 0.6314, "lr": 1.3374935016963595e-05, "epoch": 0.5925409550365981, "percentage": 40.3, "elapsed_time": "0:50:33", "remaining_time": "1:14:52"}
|
||||
{"current_steps": 900, "total_steps": 2109, "loss": 0.6287, "lr": 1.2650529917086232e-05, "epoch": 0.6273963053328686, "percentage": 42.67, "elapsed_time": "0:53:24", "remaining_time": "1:11:44"}
|
||||
{"current_steps": 950, "total_steps": 2109, "loss": 0.6233, "lr": 1.1910825132052356e-05, "epoch": 0.6622516556291391, "percentage": 45.05, "elapsed_time": "0:56:15", "remaining_time": "1:08:38"}
|
||||
{"current_steps": 1000, "total_steps": 2109, "loss": 0.6255, "lr": 1.1160090468532266e-05, "epoch": 0.6971070059254095, "percentage": 47.42, "elapsed_time": "0:59:07", "remaining_time": "1:05:33"}
|
||||
{"current_steps": 1050, "total_steps": 2109, "loss": 0.6141, "lr": 1.0402659401094154e-05, "epoch": 0.73196235622168, "percentage": 49.79, "elapsed_time": "1:04:04", "remaining_time": "1:04:37"}
|
||||
{"current_steps": 1100, "total_steps": 2109, "loss": 0.6095, "lr": 9.642904058037667e-06, "epoch": 0.7668177065179506, "percentage": 52.16, "elapsed_time": "1:06:56", "remaining_time": "1:01:23"}
|
||||
{"current_steps": 1150, "total_steps": 2109, "loss": 0.6027, "lr": 8.885209984106072e-06, "epoch": 0.801673056814221, "percentage": 54.53, "elapsed_time": "1:09:47", "remaining_time": "0:58:12"}
|
||||
{"current_steps": 1200, "total_steps": 2109, "loss": 0.6068, "lr": 8.133950825754511e-06, "epoch": 0.8365284071104915, "percentage": 56.9, "elapsed_time": "1:12:39", "remaining_time": "0:55:02"}
|
||||
{"current_steps": 1250, "total_steps": 2109, "loss": 0.6012, "lr": 7.393463085098886e-06, "epoch": 0.8713837574067619, "percentage": 59.27, "elapsed_time": "1:15:31", "remaining_time": "0:51:53"}
|
||||
{"current_steps": 1300, "total_steps": 2109, "loss": 0.6005, "lr": 6.6680210882734805e-06, "epoch": 0.9062391077030324, "percentage": 61.64, "elapsed_time": "1:18:22", "remaining_time": "0:48:46"}
|
||||
{"current_steps": 1350, "total_steps": 2109, "loss": 0.594, "lr": 5.961812312687689e-06, "epoch": 0.9410944579993029, "percentage": 64.01, "elapsed_time": "1:21:13", "remaining_time": "0:45:40"}
|
||||
{"current_steps": 1400, "total_steps": 2109, "loss": 0.5852, "lr": 5.278913215600714e-06, "epoch": 0.9759498082955733, "percentage": 66.38, "elapsed_time": "1:24:05", "remaining_time": "0:42:35"}
|
||||
{"current_steps": 1450, "total_steps": 2109, "loss": 0.5385, "lr": 4.623265703539146e-06, "epoch": 1.0104566050888812, "percentage": 68.75, "elapsed_time": "1:26:55", "remaining_time": "0:39:30"}
|
||||
{"current_steps": 1500, "total_steps": 2109, "loss": 0.4257, "lr": 3.998654378383361e-06, "epoch": 1.0453119553851515, "percentage": 71.12, "elapsed_time": "1:29:46", "remaining_time": "0:36:26"}
|
||||
{"current_steps": 1550, "total_steps": 2109, "loss": 0.4253, "lr": 3.408684691465355e-06, "epoch": 1.080167305681422, "percentage": 73.49, "elapsed_time": "1:44:43", "remaining_time": "0:37:45"}
|
||||
{"current_steps": 1600, "total_steps": 2109, "loss": 0.4221, "lr": 2.85676213177945e-06, "epoch": 1.1150226559776926, "percentage": 75.87, "elapsed_time": "1:47:34", "remaining_time": "0:34:13"}
|
||||
{"current_steps": 1650, "total_steps": 2109, "loss": 0.4244, "lr": 2.3460725684379002e-06, "epoch": 1.149878006273963, "percentage": 78.24, "elapsed_time": "1:50:25", "remaining_time": "0:30:43"}
|
||||
{"current_steps": 1700, "total_steps": 2109, "loss": 0.4162, "lr": 1.8795638608410016e-06, "epoch": 1.1847333565702336, "percentage": 80.61, "elapsed_time": "1:53:17", "remaining_time": "0:27:15"}
|
||||
{"current_steps": 1750, "total_steps": 2109, "loss": 0.4189, "lr": 1.4599288427134283e-06, "epoch": 1.219588706866504, "percentage": 82.98, "elapsed_time": "1:56:08", "remaining_time": "0:23:49"}
|
||||
{"current_steps": 1800, "total_steps": 2109, "loss": 0.4181, "lr": 1.0895897782283305e-06, "epoch": 1.2544440571627744, "percentage": 85.35, "elapsed_time": "1:58:59", "remaining_time": "0:20:25"}
|
||||
{"current_steps": 1850, "total_steps": 2109, "loss": 0.4166, "lr": 7.706843799431985e-07, "epoch": 1.289299407459045, "percentage": 87.72, "elapsed_time": "2:01:51", "remaining_time": "0:17:03"}
|
||||
{"current_steps": 1900, "total_steps": 2109, "loss": 0.4125, "lr": 5.050534692564358e-07, "epoch": 1.3241547577553154, "percentage": 90.09, "elapsed_time": "2:04:42", "remaining_time": "0:13:43"}
|
||||
{"current_steps": 1950, "total_steps": 2109, "loss": 0.4089, "lr": 2.94230350612239e-07, "epoch": 1.359010108051586, "percentage": 92.46, "elapsed_time": "2:07:33", "remaining_time": "0:10:24"}
|
||||
{"current_steps": 2000, "total_steps": 2109, "loss": 0.4135, "lr": 1.3943196078924247e-07, "epoch": 1.3938654583478565, "percentage": 94.83, "elapsed_time": "2:10:24", "remaining_time": "0:07:06"}
|
||||
{"current_steps": 2050, "total_steps": 2109, "loss": 0.4138, "lr": 4.155184436196669e-08, "epoch": 1.428720808644127, "percentage": 97.2, "elapsed_time": "2:22:02", "remaining_time": "0:04:05"}
|
||||
{"current_steps": 2100, "total_steps": 2109, "loss": 0.4112, "lr": 1.154995882924892e-09, "epoch": 1.4635761589403973, "percentage": 99.57, "elapsed_time": "2:24:54", "remaining_time": "0:00:37"}
|
||||
344
trainer_state.json
Normal file
344
trainer_state.json
Normal file
@@ -0,0 +1,344 @@
|
||||
{
|
||||
"best_global_step": null,
|
||||
"best_metric": null,
|
||||
"best_model_checkpoint": null,
|
||||
"epoch": 1.469850121993726,
|
||||
"eval_steps": 500,
|
||||
"global_step": 2109,
|
||||
"is_hyper_param_search": false,
|
||||
"is_local_process_zero": true,
|
||||
"is_world_process_zero": true,
|
||||
"log_history": [
|
||||
{
|
||||
"epoch": 0.0006971070059254096,
|
||||
"grad_norm": 2.962728261947632,
|
||||
"learning_rate": 0.0,
|
||||
"loss": 1.1855,
|
||||
"step": 1
|
||||
},
|
||||
{
|
||||
"epoch": 0.03485535029627048,
|
||||
"grad_norm": 1.953801155090332,
|
||||
"learning_rate": 1.9999434046461045e-05,
|
||||
"loss": 0.9795,
|
||||
"step": 50
|
||||
},
|
||||
{
|
||||
"epoch": 0.06971070059254096,
|
||||
"grad_norm": 1.6633161306381226,
|
||||
"learning_rate": 1.996249692618611e-05,
|
||||
"loss": 0.8103,
|
||||
"step": 100
|
||||
},
|
||||
{
|
||||
"epoch": 0.10456605088881143,
|
||||
"grad_norm": 1.8373701572418213,
|
||||
"learning_rate": 1.9868053167196865e-05,
|
||||
"loss": 0.7672,
|
||||
"step": 150
|
||||
},
|
||||
{
|
||||
"epoch": 0.13942140118508192,
|
||||
"grad_norm": 1.503631830215454,
|
||||
"learning_rate": 1.971664792831919e-05,
|
||||
"loss": 0.7452,
|
||||
"step": 200
|
||||
},
|
||||
{
|
||||
"epoch": 0.17427675148135238,
|
||||
"grad_norm": 1.5163464546203613,
|
||||
"learning_rate": 1.9509155167802316e-05,
|
||||
"loss": 0.7248,
|
||||
"step": 250
|
||||
},
|
||||
{
|
||||
"epoch": 0.20913210177762287,
|
||||
"grad_norm": 1.4677717685699463,
|
||||
"learning_rate": 1.9246772598559302e-05,
|
||||
"loss": 0.7107,
|
||||
"step": 300
|
||||
},
|
||||
{
|
||||
"epoch": 0.24398745207389333,
|
||||
"grad_norm": 1.4698961973190308,
|
||||
"learning_rate": 1.8931014774594656e-05,
|
||||
"loss": 0.7002,
|
||||
"step": 350
|
||||
},
|
||||
{
|
||||
"epoch": 0.27884280237016384,
|
||||
"grad_norm": 1.5015199184417725,
|
||||
"learning_rate": 1.8563704348526337e-05,
|
||||
"loss": 0.6954,
|
||||
"step": 400
|
||||
},
|
||||
{
|
||||
"epoch": 0.3136981526664343,
|
||||
"grad_norm": 1.4707640409469604,
|
||||
"learning_rate": 1.8146961550666525e-05,
|
||||
"loss": 0.6895,
|
||||
"step": 450
|
||||
},
|
||||
{
|
||||
"epoch": 0.34855350296270476,
|
||||
"grad_norm": 1.4305927753448486,
|
||||
"learning_rate": 1.7683191950391142e-05,
|
||||
"loss": 0.6779,
|
||||
"step": 500
|
||||
},
|
||||
{
|
||||
"epoch": 0.3834088532589753,
|
||||
"grad_norm": 1.4896072149276733,
|
||||
"learning_rate": 1.717507257044331e-05,
|
||||
"loss": 0.6669,
|
||||
"step": 550
|
||||
},
|
||||
{
|
||||
"epoch": 0.41826420355524574,
|
||||
"grad_norm": 1.4289474487304688,
|
||||
"learning_rate": 1.6625536434323358e-05,
|
||||
"loss": 0.663,
|
||||
"step": 600
|
||||
},
|
||||
{
|
||||
"epoch": 0.4531195538515162,
|
||||
"grad_norm": 1.3936972618103027,
|
||||
"learning_rate": 1.6037755635962587e-05,
|
||||
"loss": 0.6581,
|
||||
"step": 650
|
||||
},
|
||||
{
|
||||
"epoch": 0.48797490414778666,
|
||||
"grad_norm": 1.4336333274841309,
|
||||
"learning_rate": 1.5415123029408046e-05,
|
||||
"loss": 0.6487,
|
||||
"step": 700
|
||||
},
|
||||
{
|
||||
"epoch": 0.5228302544440572,
|
||||
"grad_norm": 1.4300156831741333,
|
||||
"learning_rate": 1.4761232644210963e-05,
|
||||
"loss": 0.645,
|
||||
"step": 750
|
||||
},
|
||||
{
|
||||
"epoch": 0.5576856047403277,
|
||||
"grad_norm": 1.3896256685256958,
|
||||
"learning_rate": 1.4079858939567557e-05,
|
||||
"loss": 0.6366,
|
||||
"step": 800
|
||||
},
|
||||
{
|
||||
"epoch": 0.5925409550365981,
|
||||
"grad_norm": 1.2966829538345337,
|
||||
"learning_rate": 1.3374935016963595e-05,
|
||||
"loss": 0.6314,
|
||||
"step": 850
|
||||
},
|
||||
{
|
||||
"epoch": 0.6273963053328686,
|
||||
"grad_norm": 1.304624319076538,
|
||||
"learning_rate": 1.2650529917086232e-05,
|
||||
"loss": 0.6287,
|
||||
"step": 900
|
||||
},
|
||||
{
|
||||
"epoch": 0.6622516556291391,
|
||||
"grad_norm": 1.3826805353164673,
|
||||
"learning_rate": 1.1910825132052356e-05,
|
||||
"loss": 0.6233,
|
||||
"step": 950
|
||||
},
|
||||
{
|
||||
"epoch": 0.6971070059254095,
|
||||
"grad_norm": 1.3912091255187988,
|
||||
"learning_rate": 1.1160090468532266e-05,
|
||||
"loss": 0.6255,
|
||||
"step": 1000
|
||||
},
|
||||
{
|
||||
"epoch": 0.73196235622168,
|
||||
"grad_norm": 1.2622112035751343,
|
||||
"learning_rate": 1.0402659401094154e-05,
|
||||
"loss": 0.6141,
|
||||
"step": 1050
|
||||
},
|
||||
{
|
||||
"epoch": 0.7668177065179506,
|
||||
"grad_norm": 1.3311121463775635,
|
||||
"learning_rate": 9.642904058037667e-06,
|
||||
"loss": 0.6095,
|
||||
"step": 1100
|
||||
},
|
||||
{
|
||||
"epoch": 0.801673056814221,
|
||||
"grad_norm": 1.393316626548767,
|
||||
"learning_rate": 8.885209984106072e-06,
|
||||
"loss": 0.6027,
|
||||
"step": 1150
|
||||
},
|
||||
{
|
||||
"epoch": 0.8365284071104915,
|
||||
"grad_norm": 1.3410212993621826,
|
||||
"learning_rate": 8.133950825754511e-06,
|
||||
"loss": 0.6068,
|
||||
"step": 1200
|
||||
},
|
||||
{
|
||||
"epoch": 0.8713837574067619,
|
||||
"grad_norm": 1.365545392036438,
|
||||
"learning_rate": 7.393463085098886e-06,
|
||||
"loss": 0.6012,
|
||||
"step": 1250
|
||||
},
|
||||
{
|
||||
"epoch": 0.9062391077030324,
|
||||
"grad_norm": 1.3484421968460083,
|
||||
"learning_rate": 6.6680210882734805e-06,
|
||||
"loss": 0.6005,
|
||||
"step": 1300
|
||||
},
|
||||
{
|
||||
"epoch": 0.9410944579993029,
|
||||
"grad_norm": 1.7153867483139038,
|
||||
"learning_rate": 5.961812312687689e-06,
|
||||
"loss": 0.594,
|
||||
"step": 1350
|
||||
},
|
||||
{
|
||||
"epoch": 0.9759498082955733,
|
||||
"grad_norm": 1.308719515800476,
|
||||
"learning_rate": 5.278913215600714e-06,
|
||||
"loss": 0.5852,
|
||||
"step": 1400
|
||||
},
|
||||
{
|
||||
"epoch": 1.0104566050888812,
|
||||
"grad_norm": 1.4866747856140137,
|
||||
"learning_rate": 4.623265703539146e-06,
|
||||
"loss": 0.5385,
|
||||
"step": 1450
|
||||
},
|
||||
{
|
||||
"epoch": 1.0453119553851515,
|
||||
"grad_norm": 1.4509928226470947,
|
||||
"learning_rate": 3.998654378383361e-06,
|
||||
"loss": 0.4257,
|
||||
"step": 1500
|
||||
},
|
||||
{
|
||||
"epoch": 1.080167305681422,
|
||||
"grad_norm": 1.2705004215240479,
|
||||
"learning_rate": 3.408684691465355e-06,
|
||||
"loss": 0.4253,
|
||||
"step": 1550
|
||||
},
|
||||
{
|
||||
"epoch": 1.1150226559776926,
|
||||
"grad_norm": 1.35167396068573,
|
||||
"learning_rate": 2.85676213177945e-06,
|
||||
"loss": 0.4221,
|
||||
"step": 1600
|
||||
},
|
||||
{
|
||||
"epoch": 1.149878006273963,
|
||||
"grad_norm": 1.313473105430603,
|
||||
"learning_rate": 2.3460725684379002e-06,
|
||||
"loss": 0.4244,
|
||||
"step": 1650
|
||||
},
|
||||
{
|
||||
"epoch": 1.1847333565702336,
|
||||
"grad_norm": 1.3406174182891846,
|
||||
"learning_rate": 1.8795638608410016e-06,
|
||||
"loss": 0.4162,
|
||||
"step": 1700
|
||||
},
|
||||
{
|
||||
"epoch": 1.219588706866504,
|
||||
"grad_norm": 1.263272762298584,
|
||||
"learning_rate": 1.4599288427134283e-06,
|
||||
"loss": 0.4189,
|
||||
"step": 1750
|
||||
},
|
||||
{
|
||||
"epoch": 1.2544440571627744,
|
||||
"grad_norm": 1.3713723421096802,
|
||||
"learning_rate": 1.0895897782283305e-06,
|
||||
"loss": 0.4181,
|
||||
"step": 1800
|
||||
},
|
||||
{
|
||||
"epoch": 1.289299407459045,
|
||||
"grad_norm": 1.3794862031936646,
|
||||
"learning_rate": 7.706843799431985e-07,
|
||||
"loss": 0.4166,
|
||||
"step": 1850
|
||||
},
|
||||
{
|
||||
"epoch": 1.3241547577553154,
|
||||
"grad_norm": 1.3196351528167725,
|
||||
"learning_rate": 5.050534692564358e-07,
|
||||
"loss": 0.4125,
|
||||
"step": 1900
|
||||
},
|
||||
{
|
||||
"epoch": 1.359010108051586,
|
||||
"grad_norm": 1.3097676038742065,
|
||||
"learning_rate": 2.94230350612239e-07,
|
||||
"loss": 0.4089,
|
||||
"step": 1950
|
||||
},
|
||||
{
|
||||
"epoch": 1.3938654583478565,
|
||||
"grad_norm": 1.329222559928894,
|
||||
"learning_rate": 1.3943196078924247e-07,
|
||||
"loss": 0.4135,
|
||||
"step": 2000
|
||||
},
|
||||
{
|
||||
"epoch": 1.428720808644127,
|
||||
"grad_norm": 1.3468352556228638,
|
||||
"learning_rate": 4.155184436196669e-08,
|
||||
"loss": 0.4138,
|
||||
"step": 2050
|
||||
},
|
||||
{
|
||||
"epoch": 1.4635761589403973,
|
||||
"grad_norm": 1.3396908044815063,
|
||||
"learning_rate": 1.154995882924892e-09,
|
||||
"loss": 0.4112,
|
||||
"step": 2100
|
||||
},
|
||||
{
|
||||
"epoch": 1.469850121993726,
|
||||
"step": 2109,
|
||||
"total_flos": 5.7513841193385984e+17,
|
||||
"train_loss": 0.5876018210235169,
|
||||
"train_runtime": 8852.0342,
|
||||
"train_samples_per_second": 1.906,
|
||||
"train_steps_per_second": 0.238
|
||||
}
|
||||
],
|
||||
"logging_steps": 50,
|
||||
"max_steps": 2109,
|
||||
"num_input_tokens_seen": 0,
|
||||
"num_train_epochs": 2,
|
||||
"save_steps": 500,
|
||||
"stateful_callbacks": {
|
||||
"TrainerControl": {
|
||||
"args": {
|
||||
"should_epoch_stop": false,
|
||||
"should_evaluate": false,
|
||||
"should_log": false,
|
||||
"should_save": true,
|
||||
"should_training_stop": true
|
||||
},
|
||||
"attributes": {}
|
||||
}
|
||||
},
|
||||
"total_flos": 5.7513841193385984e+17,
|
||||
"train_batch_size": 1,
|
||||
"trial_name": null,
|
||||
"trial_params": null
|
||||
}
|
||||
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d1ceaad6664768e2095f3a8597de77b34f5991d49135917611b50fb2224913bd
|
||||
size 6289
|
||||
BIN
training_loss.png
Normal file
BIN
training_loss.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 38 KiB |
1
vocab.json
Normal file
1
vocab.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user