初始化项目,由ModelHub XC社区提供模型

Model: alwaysgood/QWEN3-4B-CPT
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-01 18:43:25 +08:00
commit 67c081bc2e
30 changed files with 39376 additions and 0 deletions

38
.gitattributes vendored Normal file
View File

@@ -0,0 +1,38 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
checkpoint-1477/tokenizer.json filter=lfs diff=lfs merge=lfs -text
tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

59
README.md Normal file
View File

@@ -0,0 +1,59 @@
---
base_model: unsloth/Qwen3-4B-Base
library_name: transformers
model_name: checkpoints
tags:
- generated_from_trainer
- sft
- unsloth
- trl
licence: license
---
# Model Card for checkpoints
This model is a fine-tuned version of [unsloth/Qwen3-4B-Base](https://huggingface.co/unsloth/Qwen3-4B-Base).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="None", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/hiloong/mono-cpt/runs/sxp4zkdr)
This model was trained with SFT.
### Framework versions
- TRL: 0.24.0
- Transformers: 5.5.3
- Pytorch: 2.9.0+cu128
- Datasets: 4.3.0
- Tokenizers: 0.22.2
## Citations
Cite TRL as:
```bibtex
@misc{vonwerra2022trl,
title = {{TRL: Transformer Reinforcement Learning}},
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
year = 2020,
journal = {GitHub repository},
publisher = {GitHub},
howpublished = {\url{https://github.com/huggingface/trl}}
}
```

12
all_results.json Normal file
View File

@@ -0,0 +1,12 @@
{
"epoch": 1.0,
"eval_loss": 1.7002116441726685,
"eval_runtime": 173.1669,
"eval_samples_per_second": 5.526,
"eval_steps_per_second": 0.693,
"total_flos": 2.103177196962902e+18,
"train_loss": 1.7256613558986822,
"train_runtime": 29239.084,
"train_samples_per_second": 1.616,
"train_steps_per_second": 0.051
}

View File

@@ -0,0 +1,74 @@
{
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": null,
"dtype": "bfloat16",
"eos_token_id": 151643,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 2560,
"initializer_range": 0.02,
"intermediate_size": 9728,
"layer_types": [
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"max_window_layers": 36,
"model_name": "unsloth/Qwen3-4B-Base",
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"pad_token_id": 151669,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"rope_theta": 1000000,
"rope_type": "default"
},
"sliding_window": null,
"tie_word_embeddings": true,
"transformers_version": "5.5.3",
"unsloth_fixed": true,
"unsloth_version": "2026.4.4",
"use_cache": false,
"use_sliding_window": false,
"vocab_size": 151936
}

View File

@@ -0,0 +1,9 @@
{
"eos_token_id": [
151643
],
"max_length": 32768,
"max_new_tokens": 2048,
"pad_token_id": 151669,
"transformers_version": "5.5.3"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a3a4a2abfa757af075d0fa804b2093ad46c4d9bc4a227a070907a885eea69e97
size 8044982080

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fa38e6bbd0738ba63086bd0369ac5ef53d96f94c1d3bf63cd8286ec8d324f12e
size 14534393422

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a8e2011629d8bed3ef560fa11175cac55684c4e12a72634bb24abf767b6c7399
size 14645

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:41b1610c26267a8ba984a88363b2c05d3a4b232638c92e564e21a007ac9f4fc1
size 1465

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:45c4ffda6666cf6d75d0b1f961f25964e2a52a62e78aaecb2f458e9ba9824112
size 11422840

View File

@@ -0,0 +1,15 @@
{
"add_prefix_space": false,
"backend": "tokenizers",
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"errors": "replace",
"is_local": false,
"model_max_length": 32768,
"pad_token": "<|PAD_TOKEN|>",
"padding_side": "right",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:021e20fabb8f12442e13effbcc63f0a47b25ed87f82c678b87ee5792f87ef9bc
size 5777

74
config.json Normal file
View File

@@ -0,0 +1,74 @@
{
"architectures": [
"Qwen3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": null,
"dtype": "bfloat16",
"eos_token_id": 151643,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 2560,
"initializer_range": 0.02,
"intermediate_size": 9728,
"layer_types": [
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"max_window_layers": 36,
"model_name": "unsloth/Qwen3-4B-Base",
"model_type": "qwen3",
"num_attention_heads": 32,
"num_hidden_layers": 36,
"num_key_value_heads": 8,
"pad_token_id": 151669,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"rope_theta": 1000000,
"rope_type": "default"
},
"sliding_window": null,
"tie_word_embeddings": true,
"transformers_version": "5.5.3",
"unsloth_fixed": true,
"unsloth_version": "2026.4.4",
"use_cache": false,
"use_sliding_window": false,
"vocab_size": 151936
}

15657
eval/eval_results_final.json Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

8
eval/summary.json Normal file
View File

@@ -0,0 +1,8 @@
[
{
"label": "final",
"ppl": 5.476611910214701,
"base_ppl": 6.23081716116593,
"path": "/home/unsloth/scp_stage1_cpt/artifacts/cpt_full_96gb_qwen3_4b/checkpoints"
}
]

7
eval_results.json Normal file
View File

@@ -0,0 +1,7 @@
{
"epoch": 1.0,
"eval_loss": 1.7002116441726685,
"eval_runtime": 173.1669,
"eval_samples_per_second": 5.526,
"eval_steps_per_second": 0.693
}

9
generation_config.json Normal file
View File

@@ -0,0 +1,9 @@
{
"eos_token_id": [
151643
],
"max_length": 32768,
"max_new_tokens": 2048,
"pad_token_id": 151669,
"transformers_version": "5.5.3"
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a3a4a2abfa757af075d0fa804b2093ad46c4d9bc4a227a070907a885eea69e97
size 8044982080

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:45c4ffda6666cf6d75d0b1f961f25964e2a52a62e78aaecb2f458e9ba9824112
size 11422840

3
tokenizer/tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:45c4ffda6666cf6d75d0b1f961f25964e2a52a62e78aaecb2f458e9ba9824112
size 11422840

View File

@@ -0,0 +1,15 @@
{
"add_prefix_space": false,
"backend": "tokenizers",
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"errors": "replace",
"is_local": false,
"model_max_length": 32768,
"pad_token": "<|PAD_TOKEN|>",
"padding_side": "left",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

15
tokenizer_config.json Normal file
View File

@@ -0,0 +1,15 @@
{
"add_prefix_space": false,
"backend": "tokenizers",
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"errors": "replace",
"is_local": false,
"model_max_length": 32768,
"pad_token": "<|PAD_TOKEN|>",
"padding_side": "left",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

8
train_results.json Normal file
View File

@@ -0,0 +1,8 @@
{
"epoch": 1.0,
"total_flos": 2.103177196962902e+18,
"train_loss": 1.7256613558986822,
"train_runtime": 29239.084,
"train_samples_per_second": 1.616,
"train_steps_per_second": 0.051
}

1096
trainer_state.json Normal file

File diff suppressed because it is too large Load Diff

3
training_args.bin Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:021e20fabb8f12442e13effbcc63f0a47b25ed87f82c678b87ee5792f87ef9bc
size 5777