初始化项目,由ModelHub XC社区提供模型
Model: aasim-m/daft-qwen2.5-coder-3b-instruct-full Source: Original Platform
This commit is contained in:
36
.gitattributes
vendored
Normal file
36
.gitattributes
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
||||
61
README.md
Normal file
61
README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
library_name: transformers
|
||||
license: other
|
||||
base_model: Qwen/Qwen2.5-Coder-3B-Instruct
|
||||
tags:
|
||||
- llama-factory
|
||||
- full
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: sft
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# sft
|
||||
|
||||
This model is a fine-tuned version of [Qwen/Qwen2.5-Coder-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-3B-Instruct) on the daft_functions_dedup_sharegpt dataset.
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 1e-05
|
||||
- train_batch_size: 4
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- distributed_type: multi-GPU
|
||||
- num_devices: 4
|
||||
- gradient_accumulation_steps: 32
|
||||
- total_train_batch_size: 512
|
||||
- total_eval_batch_size: 32
|
||||
- optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
||||
- lr_scheduler_type: cosine
|
||||
- lr_scheduler_warmup_steps: 0.1
|
||||
- num_epochs: 3.0
|
||||
|
||||
### Training results
|
||||
|
||||
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 5.2.0
|
||||
- Pytorch 2.8.0+cu128
|
||||
- Datasets 4.0.0
|
||||
- Tokenizers 0.22.2
|
||||
8
all_results.json
Normal file
8
all_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 3.0,
|
||||
"total_flos": 3081875480379392.0,
|
||||
"train_loss": 0.06056562058377327,
|
||||
"train_runtime": 29609.547,
|
||||
"train_samples_per_second": 6.685,
|
||||
"train_steps_per_second": 0.013
|
||||
}
|
||||
54
chat_template.jinja
Normal file
54
chat_template.jinja
Normal file
@@ -0,0 +1,54 @@
|
||||
{%- if tools %}
|
||||
{{- '<|im_start|>system\n' }}
|
||||
{%- if messages[0]['role'] == 'system' %}
|
||||
{{- messages[0]['content'] }}
|
||||
{%- else %}
|
||||
{{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
|
||||
{%- endif %}
|
||||
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
||||
{%- for tool in tools %}
|
||||
{{- "\n" }}
|
||||
{{- tool | tojson }}
|
||||
{%- endfor %}
|
||||
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
||||
{%- else %}
|
||||
{%- if messages[0]['role'] == 'system' %}
|
||||
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
|
||||
{%- else %}
|
||||
{{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- for message in messages %}
|
||||
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
|
||||
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
||||
{%- elif message.role == "assistant" %}
|
||||
{{- '<|im_start|>' + message.role }}
|
||||
{%- if message.content %}
|
||||
{{- '\n' + message.content }}
|
||||
{%- endif %}
|
||||
{%- for tool_call in message.tool_calls %}
|
||||
{%- if tool_call.function is defined %}
|
||||
{%- set tool_call = tool_call.function %}
|
||||
{%- endif %}
|
||||
{{- '\n<tool_call>\n{"name": "' }}
|
||||
{{- tool_call.name }}
|
||||
{{- '", "arguments": ' }}
|
||||
{{- tool_call.arguments | tojson }}
|
||||
{{- '}\n</tool_call>' }}
|
||||
{%- endfor %}
|
||||
{{- '<|im_end|>\n' }}
|
||||
{%- elif message.role == "tool" %}
|
||||
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
|
||||
{{- '<|im_start|>user' }}
|
||||
{%- endif %}
|
||||
{{- '\n<tool_response>\n' }}
|
||||
{{- message.content }}
|
||||
{{- '\n</tool_response>' }}
|
||||
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
||||
{{- '<|im_end|>\n' }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- if add_generation_prompt %}
|
||||
{{- '<|im_start|>assistant\n' }}
|
||||
{%- endif %}
|
||||
69
config.json
Normal file
69
config.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"architectures": [
|
||||
"Qwen2ForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": null,
|
||||
"dtype": "bfloat16",
|
||||
"eos_token_id": 151645,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 2048,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 11008,
|
||||
"layer_types": [
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention",
|
||||
"full_attention"
|
||||
],
|
||||
"max_position_embeddings": 32768,
|
||||
"max_window_layers": 36,
|
||||
"model_type": "qwen2",
|
||||
"num_attention_heads": 16,
|
||||
"num_hidden_layers": 36,
|
||||
"num_key_value_heads": 2,
|
||||
"pad_token_id": 151643,
|
||||
"rms_norm_eps": 1e-06,
|
||||
"rope_parameters": {
|
||||
"rope_theta": 1000000.0,
|
||||
"rope_type": "default"
|
||||
},
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": true,
|
||||
"transformers_version": "5.2.0",
|
||||
"use_cache": true,
|
||||
"use_sliding_window": false,
|
||||
"vocab_size": 151936
|
||||
}
|
||||
13
generation_config.json
Normal file
13
generation_config.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"do_sample": true,
|
||||
"eos_token_id": [
|
||||
151645,
|
||||
151643
|
||||
],
|
||||
"pad_token_id": 151643,
|
||||
"repetition_penalty": 1.05,
|
||||
"temperature": 0.7,
|
||||
"top_k": 20,
|
||||
"top_p": 0.8,
|
||||
"transformers_version": "5.2.0"
|
||||
}
|
||||
3
model.safetensors
Normal file
3
model.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8b3a0782298f3bc5526de50d9ee1bba89a44248fe431ffc888ea86d6ed217618
|
||||
size 6171927112
|
||||
3
tokenizer.json
Normal file
3
tokenizer.json
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
|
||||
size 11421892
|
||||
30
tokenizer_config.json
Normal file
30
tokenizer_config.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"backend": "tokenizers",
|
||||
"bos_token": null,
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"eos_token": "<|im_end|>",
|
||||
"errors": "replace",
|
||||
"extra_special_tokens": {
|
||||
"<|im_start|>": "<|im_start|>",
|
||||
"<|im_end|>": "<|im_end|>",
|
||||
"<|object_ref_start|>": "<|object_ref_start|>",
|
||||
"<|object_ref_end|>": "<|object_ref_end|>",
|
||||
"<|box_start|>": "<|box_start|>",
|
||||
"<|box_end|>": "<|box_end|>",
|
||||
"<|quad_start|>": "<|quad_start|>",
|
||||
"<|quad_end|>": "<|quad_end|>",
|
||||
"<|vision_start|>": "<|vision_start|>",
|
||||
"<|vision_end|>": "<|vision_end|>",
|
||||
"<|vision_pad|>": "<|vision_pad|>",
|
||||
"<|image_pad|>": "<|image_pad|>",
|
||||
"<|video_pad|>": "<|video_pad|>"
|
||||
},
|
||||
"is_local": false,
|
||||
"model_max_length": 32768,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"padding_side": "right",
|
||||
"split_special_tokens": false,
|
||||
"tokenizer_class": "Qwen2Tokenizer",
|
||||
"unk_token": null
|
||||
}
|
||||
8
train_results.json
Normal file
8
train_results.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"epoch": 3.0,
|
||||
"total_flos": 3081875480379392.0,
|
||||
"train_loss": 0.06056562058377327,
|
||||
"train_runtime": 29609.547,
|
||||
"train_samples_per_second": 6.685,
|
||||
"train_steps_per_second": 0.013
|
||||
}
|
||||
27
trainer_log.jsonl
Normal file
27
trainer_log.jsonl
Normal file
@@ -0,0 +1,27 @@
|
||||
{"current_steps": 130, "total_steps": 387, "loss": 0.11126101016998291, "lr": 8.438497294267117e-06, "epoch": 1.0077594568380213, "percentage": 33.59, "elapsed_time": "0:18:26", "remaining_time": "0:36:26"}
|
||||
{"current_steps": 140, "total_steps": 387, "loss": 0.10776399374008179, "lr": 8.097241806078616e-06, "epoch": 1.0853540252182348, "percentage": 36.18, "elapsed_time": "0:36:50", "remaining_time": "1:04:59"}
|
||||
{"current_steps": 150, "total_steps": 387, "loss": 0.10431833267211914, "lr": 7.730761885468486e-06, "epoch": 1.162948593598448, "percentage": 38.76, "elapsed_time": "0:55:23", "remaining_time": "1:27:31"}
|
||||
{"current_steps": 160, "total_steps": 387, "loss": 0.10304663181304932, "lr": 7.342042203498952e-06, "epoch": 1.2405431619786615, "percentage": 41.34, "elapsed_time": "1:14:02", "remaining_time": "1:45:03"}
|
||||
{"current_steps": 170, "total_steps": 387, "loss": 0.09784629344940185, "lr": 6.934248555404197e-06, "epoch": 1.3181377303588748, "percentage": 43.93, "elapsed_time": "1:32:53", "remaining_time": "1:58:33"}
|
||||
{"current_steps": 180, "total_steps": 387, "loss": 0.09537227749824524, "lr": 6.510702077847864e-06, "epoch": 1.3957322987390883, "percentage": 46.51, "elapsed_time": "1:51:08", "remaining_time": "2:07:48"}
|
||||
{"current_steps": 190, "total_steps": 387, "loss": 0.09520423412322998, "lr": 6.074852201055121e-06, "epoch": 1.4733268671193016, "percentage": 49.1, "elapsed_time": "2:09:17", "remaining_time": "2:14:03"}
|
||||
{"current_steps": 200, "total_steps": 387, "loss": 0.09088362455368042, "lr": 5.630248556101448e-06, "epoch": 1.5509214354995149, "percentage": 51.68, "elapsed_time": "2:27:46", "remaining_time": "2:18:10"}
|
||||
{"current_steps": 210, "total_steps": 387, "loss": 0.0899280071258545, "lr": 5.180512066149682e-06, "epoch": 1.6285160038797284, "percentage": 54.26, "elapsed_time": "2:46:36", "remaining_time": "2:20:25"}
|
||||
{"current_steps": 220, "total_steps": 387, "loss": 0.0881616234779358, "lr": 4.729305457072913e-06, "epoch": 1.706110572259942, "percentage": 56.85, "elapsed_time": "3:04:52", "remaining_time": "2:20:20"}
|
||||
{"current_steps": 230, "total_steps": 387, "loss": 0.08638249635696411, "lr": 4.280303427629404e-06, "epoch": 1.7837051406401552, "percentage": 59.43, "elapsed_time": "3:23:30", "remaining_time": "2:18:54"}
|
||||
{"current_steps": 240, "total_steps": 387, "loss": 0.08716154098510742, "lr": 3.8371627221284495e-06, "epoch": 1.8612997090203685, "percentage": 62.02, "elapsed_time": "3:41:53", "remaining_time": "2:15:54"}
|
||||
{"current_steps": 250, "total_steps": 387, "loss": 0.08580605983734131, "lr": 3.403492349320101e-06, "epoch": 1.938894277400582, "percentage": 64.6, "elapsed_time": "4:00:25", "remaining_time": "2:11:44"}
|
||||
{"current_steps": 260, "total_steps": 387, "loss": 0.08316840529441834, "lr": 2.982824190050958e-06, "epoch": 2.0155189136760425, "percentage": 67.18, "elapsed_time": "4:18:27", "remaining_time": "2:06:14"}
|
||||
{"current_steps": 270, "total_steps": 387, "loss": 0.08091338872909545, "lr": 2.5785842330619038e-06, "epoch": 2.093113482056256, "percentage": 69.77, "elapsed_time": "4:37:00", "remaining_time": "2:00:02"}
|
||||
{"current_steps": 280, "total_steps": 387, "loss": 0.08085420131683349, "lr": 2.1940646731880887e-06, "epoch": 2.1707080504364695, "percentage": 72.35, "elapsed_time": "4:55:04", "remaining_time": "1:52:45"}
|
||||
{"current_steps": 290, "total_steps": 387, "loss": 0.08156624436378479, "lr": 1.8323970991978823e-06, "epoch": 2.248302618816683, "percentage": 74.94, "elapsed_time": "5:14:09", "remaining_time": "1:45:04"}
|
||||
{"current_steps": 300, "total_steps": 387, "loss": 0.0808843195438385, "lr": 1.4965269896332884e-06, "epoch": 2.325897187196896, "percentage": 77.52, "elapsed_time": "5:32:45", "remaining_time": "1:36:30"}
|
||||
{"current_steps": 310, "total_steps": 387, "loss": 0.07979943156242371, "lr": 1.1891897243618184e-06, "epoch": 2.4034917555771096, "percentage": 80.1, "elapsed_time": "5:50:50", "remaining_time": "1:27:08"}
|
||||
{"current_steps": 320, "total_steps": 387, "loss": 0.08049517869949341, "lr": 9.128883072055411e-07, "epoch": 2.481086323957323, "percentage": 82.69, "elapsed_time": "6:09:14", "remaining_time": "1:17:18"}
|
||||
{"current_steps": 330, "total_steps": 387, "loss": 0.08011389374732972, "lr": 6.698729810778065e-07, "epoch": 2.558680892337536, "percentage": 85.27, "elapsed_time": "6:27:50", "remaining_time": "1:06:59"}
|
||||
{"current_steps": 340, "total_steps": 387, "loss": 0.08163015246391296, "lr": 4.6212290164521554e-07, "epoch": 2.6362754607177497, "percentage": 87.86, "elapsed_time": "6:46:09", "remaining_time": "0:56:08"}
|
||||
{"current_steps": 350, "total_steps": 387, "loss": 0.08051948547363282, "lr": 2.9133001876746004e-07, "epoch": 2.713870029097963, "percentage": 90.44, "elapsed_time": "7:04:35", "remaining_time": "0:44:53"}
|
||||
{"current_steps": 360, "total_steps": 387, "loss": 0.07719261646270752, "lr": 1.5888529698718347e-07, "epoch": 2.7914645974781767, "percentage": 93.02, "elapsed_time": "7:23:07", "remaining_time": "0:33:14"}
|
||||
{"current_steps": 370, "total_steps": 387, "loss": 0.08128957152366638, "lr": 6.58673872923693e-08, "epoch": 2.86905916585839, "percentage": 95.61, "elapsed_time": "7:42:08", "remaining_time": "0:21:14"}
|
||||
{"current_steps": 380, "total_steps": 387, "loss": 0.07743191719055176, "lr": 1.3033842410251074e-08, "epoch": 2.946653734238603, "percentage": 98.19, "elapsed_time": "8:00:28", "remaining_time": "0:08:51"}
|
||||
{"current_steps": 387, "total_steps": 387, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "8:13:28", "remaining_time": "0:00:00"}
|
||||
309
trainer_state.json
Normal file
309
trainer_state.json
Normal file
@@ -0,0 +1,309 @@
|
||||
{
|
||||
"best_global_step": null,
|
||||
"best_metric": null,
|
||||
"best_model_checkpoint": null,
|
||||
"epoch": 3.0,
|
||||
"eval_steps": 500,
|
||||
"global_step": 387,
|
||||
"is_hyper_param_search": false,
|
||||
"is_local_process_zero": true,
|
||||
"is_world_process_zero": true,
|
||||
"log_history": [
|
||||
{
|
||||
"epoch": 0.07759456838021339,
|
||||
"grad_norm": 1.9683642394428182,
|
||||
"learning_rate": 2.307692307692308e-06,
|
||||
"loss": 0.7343237400054932,
|
||||
"step": 10
|
||||
},
|
||||
{
|
||||
"epoch": 0.15518913676042678,
|
||||
"grad_norm": 1.4175428237350762,
|
||||
"learning_rate": 4.871794871794872e-06,
|
||||
"loss": 0.5461452007293701,
|
||||
"step": 20
|
||||
},
|
||||
{
|
||||
"epoch": 0.23278370514064015,
|
||||
"grad_norm": 0.5442834252561063,
|
||||
"learning_rate": 7.435897435897437e-06,
|
||||
"loss": 0.3490773677825928,
|
||||
"step": 30
|
||||
},
|
||||
{
|
||||
"epoch": 0.31037827352085356,
|
||||
"grad_norm": 0.32322946422972365,
|
||||
"learning_rate": 1e-05,
|
||||
"loss": 0.2592954635620117,
|
||||
"step": 40
|
||||
},
|
||||
{
|
||||
"epoch": 0.3879728419010669,
|
||||
"grad_norm": 0.24901563193155196,
|
||||
"learning_rate": 9.979639600327522e-06,
|
||||
"loss": 0.2136533737182617,
|
||||
"step": 50
|
||||
},
|
||||
{
|
||||
"epoch": 0.4655674102812803,
|
||||
"grad_norm": 0.2047675084448879,
|
||||
"learning_rate": 9.918724219660013e-06,
|
||||
"loss": 0.18301695585250854,
|
||||
"step": 60
|
||||
},
|
||||
{
|
||||
"epoch": 0.5431619786614937,
|
||||
"grad_norm": 0.1694310997257767,
|
||||
"learning_rate": 9.817749962596115e-06,
|
||||
"loss": 0.16246029138565063,
|
||||
"step": 70
|
||||
},
|
||||
{
|
||||
"epoch": 0.6207565470417071,
|
||||
"grad_norm": 0.22587456656054467,
|
||||
"learning_rate": 9.677539179628005e-06,
|
||||
"loss": 0.14934264421463012,
|
||||
"step": 80
|
||||
},
|
||||
{
|
||||
"epoch": 0.6983511154219205,
|
||||
"grad_norm": 0.22154973989105028,
|
||||
"learning_rate": 9.499233769787534e-06,
|
||||
"loss": 0.134801185131073,
|
||||
"step": 90
|
||||
},
|
||||
{
|
||||
"epoch": 0.7759456838021338,
|
||||
"grad_norm": 0.2099862635469814,
|
||||
"learning_rate": 9.284285880837947e-06,
|
||||
"loss": 0.13017673492431642,
|
||||
"step": 100
|
||||
},
|
||||
{
|
||||
"epoch": 0.8535402521823472,
|
||||
"grad_norm": 0.32230657820182124,
|
||||
"learning_rate": 9.034446082750352e-06,
|
||||
"loss": 0.12214579582214355,
|
||||
"step": 110
|
||||
},
|
||||
{
|
||||
"epoch": 0.9311348205625606,
|
||||
"grad_norm": 0.324253054340729,
|
||||
"learning_rate": 8.751749110782013e-06,
|
||||
"loss": 0.12026152610778809,
|
||||
"step": 120
|
||||
},
|
||||
{
|
||||
"epoch": 1.0077594568380213,
|
||||
"grad_norm": 0.20488241588612174,
|
||||
"learning_rate": 8.438497294267117e-06,
|
||||
"loss": 0.11126101016998291,
|
||||
"step": 130
|
||||
},
|
||||
{
|
||||
"epoch": 1.0853540252182348,
|
||||
"grad_norm": 0.20661218086124847,
|
||||
"learning_rate": 8.097241806078616e-06,
|
||||
"loss": 0.10776399374008179,
|
||||
"step": 140
|
||||
},
|
||||
{
|
||||
"epoch": 1.162948593598448,
|
||||
"grad_norm": 0.25468202960165104,
|
||||
"learning_rate": 7.730761885468486e-06,
|
||||
"loss": 0.10431833267211914,
|
||||
"step": 150
|
||||
},
|
||||
{
|
||||
"epoch": 1.2405431619786615,
|
||||
"grad_norm": 0.17930064486716413,
|
||||
"learning_rate": 7.342042203498952e-06,
|
||||
"loss": 0.10304663181304932,
|
||||
"step": 160
|
||||
},
|
||||
{
|
||||
"epoch": 1.3181377303588748,
|
||||
"grad_norm": 0.20225538073749422,
|
||||
"learning_rate": 6.934248555404197e-06,
|
||||
"loss": 0.09784629344940185,
|
||||
"step": 170
|
||||
},
|
||||
{
|
||||
"epoch": 1.3957322987390883,
|
||||
"grad_norm": 0.2256721972453044,
|
||||
"learning_rate": 6.510702077847864e-06,
|
||||
"loss": 0.09537227749824524,
|
||||
"step": 180
|
||||
},
|
||||
{
|
||||
"epoch": 1.4733268671193016,
|
||||
"grad_norm": 0.21487787771920072,
|
||||
"learning_rate": 6.074852201055121e-06,
|
||||
"loss": 0.09520423412322998,
|
||||
"step": 190
|
||||
},
|
||||
{
|
||||
"epoch": 1.5509214354995149,
|
||||
"grad_norm": 0.17540761321861204,
|
||||
"learning_rate": 5.630248556101448e-06,
|
||||
"loss": 0.09088362455368042,
|
||||
"step": 200
|
||||
},
|
||||
{
|
||||
"epoch": 1.6285160038797284,
|
||||
"grad_norm": 0.21743503130668765,
|
||||
"learning_rate": 5.180512066149682e-06,
|
||||
"loss": 0.0899280071258545,
|
||||
"step": 210
|
||||
},
|
||||
{
|
||||
"epoch": 1.706110572259942,
|
||||
"grad_norm": 0.20331687416060285,
|
||||
"learning_rate": 4.729305457072913e-06,
|
||||
"loss": 0.0881616234779358,
|
||||
"step": 220
|
||||
},
|
||||
{
|
||||
"epoch": 1.7837051406401552,
|
||||
"grad_norm": 0.15781467110120098,
|
||||
"learning_rate": 4.280303427629404e-06,
|
||||
"loss": 0.08638249635696411,
|
||||
"step": 230
|
||||
},
|
||||
{
|
||||
"epoch": 1.8612997090203685,
|
||||
"grad_norm": 0.1623620489054104,
|
||||
"learning_rate": 3.8371627221284495e-06,
|
||||
"loss": 0.08716154098510742,
|
||||
"step": 240
|
||||
},
|
||||
{
|
||||
"epoch": 1.938894277400582,
|
||||
"grad_norm": 0.15611783173066054,
|
||||
"learning_rate": 3.403492349320101e-06,
|
||||
"loss": 0.08580605983734131,
|
||||
"step": 250
|
||||
},
|
||||
{
|
||||
"epoch": 2.0155189136760425,
|
||||
"grad_norm": 0.15287072067575233,
|
||||
"learning_rate": 2.982824190050958e-06,
|
||||
"loss": 0.08316840529441834,
|
||||
"step": 260
|
||||
},
|
||||
{
|
||||
"epoch": 2.093113482056256,
|
||||
"grad_norm": 0.1853136112632167,
|
||||
"learning_rate": 2.5785842330619038e-06,
|
||||
"loss": 0.08091338872909545,
|
||||
"step": 270
|
||||
},
|
||||
{
|
||||
"epoch": 2.1707080504364695,
|
||||
"grad_norm": 0.14114872525549504,
|
||||
"learning_rate": 2.1940646731880887e-06,
|
||||
"loss": 0.08085420131683349,
|
||||
"step": 280
|
||||
},
|
||||
{
|
||||
"epoch": 2.248302618816683,
|
||||
"grad_norm": 0.13643528182686213,
|
||||
"learning_rate": 1.8323970991978823e-06,
|
||||
"loss": 0.08156624436378479,
|
||||
"step": 290
|
||||
},
|
||||
{
|
||||
"epoch": 2.325897187196896,
|
||||
"grad_norm": 0.14573681730374075,
|
||||
"learning_rate": 1.4965269896332884e-06,
|
||||
"loss": 0.0808843195438385,
|
||||
"step": 300
|
||||
},
|
||||
{
|
||||
"epoch": 2.4034917555771096,
|
||||
"grad_norm": 0.1466398992341211,
|
||||
"learning_rate": 1.1891897243618184e-06,
|
||||
"loss": 0.07979943156242371,
|
||||
"step": 310
|
||||
},
|
||||
{
|
||||
"epoch": 2.481086323957323,
|
||||
"grad_norm": 0.12798260710398743,
|
||||
"learning_rate": 9.128883072055411e-07,
|
||||
"loss": 0.08049517869949341,
|
||||
"step": 320
|
||||
},
|
||||
{
|
||||
"epoch": 2.558680892337536,
|
||||
"grad_norm": 0.13826353734235647,
|
||||
"learning_rate": 6.698729810778065e-07,
|
||||
"loss": 0.08011389374732972,
|
||||
"step": 330
|
||||
},
|
||||
{
|
||||
"epoch": 2.6362754607177497,
|
||||
"grad_norm": 0.1305401343538733,
|
||||
"learning_rate": 4.6212290164521554e-07,
|
||||
"loss": 0.08163015246391296,
|
||||
"step": 340
|
||||
},
|
||||
{
|
||||
"epoch": 2.713870029097963,
|
||||
"grad_norm": 0.12804004522045906,
|
||||
"learning_rate": 2.9133001876746004e-07,
|
||||
"loss": 0.08051948547363282,
|
||||
"step": 350
|
||||
},
|
||||
{
|
||||
"epoch": 2.7914645974781767,
|
||||
"grad_norm": 0.12808224007612634,
|
||||
"learning_rate": 1.5888529698718347e-07,
|
||||
"loss": 0.07719261646270752,
|
||||
"step": 360
|
||||
},
|
||||
{
|
||||
"epoch": 2.86905916585839,
|
||||
"grad_norm": 0.12117673381149041,
|
||||
"learning_rate": 6.58673872923693e-08,
|
||||
"loss": 0.08128957152366638,
|
||||
"step": 370
|
||||
},
|
||||
{
|
||||
"epoch": 2.946653734238603,
|
||||
"grad_norm": 0.124324493318766,
|
||||
"learning_rate": 1.3033842410251074e-08,
|
||||
"loss": 0.07743191719055176,
|
||||
"step": 380
|
||||
},
|
||||
{
|
||||
"epoch": 3.0,
|
||||
"step": 387,
|
||||
"total_flos": 3081875480379392.0,
|
||||
"train_loss": 0.06056562058377327,
|
||||
"train_runtime": 29609.547,
|
||||
"train_samples_per_second": 6.685,
|
||||
"train_steps_per_second": 0.013
|
||||
}
|
||||
],
|
||||
"logging_steps": 10,
|
||||
"max_steps": 387,
|
||||
"num_input_tokens_seen": 0,
|
||||
"num_train_epochs": 3,
|
||||
"save_steps": 40,
|
||||
"stateful_callbacks": {
|
||||
"TrainerControl": {
|
||||
"args": {
|
||||
"should_epoch_stop": false,
|
||||
"should_evaluate": false,
|
||||
"should_log": false,
|
||||
"should_save": true,
|
||||
"should_training_stop": true
|
||||
},
|
||||
"attributes": {}
|
||||
}
|
||||
},
|
||||
"total_flos": 3081875480379392.0,
|
||||
"train_batch_size": 4,
|
||||
"trial_name": null,
|
||||
"trial_params": null
|
||||
}
|
||||
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:183e7b3bdde253f2571677062e7c65704a64af443b29948fe6b9d366ccf011e3
|
||||
size 7377
|
||||
BIN
training_loss.png
Normal file
BIN
training_loss.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 32 KiB |
Reference in New Issue
Block a user