初始化项目,由ModelHub XC社区提供模型

Model: Shahriar12354/testfahim
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-23 15:26:06 +08:00
commit ee8635b43a
20 changed files with 728 additions and 0 deletions

37
.gitattributes vendored Normal file
View File

@@ -0,0 +1,37 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
checkpoint-284/tokenizer.json filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

58
README.md Normal file
View File

@@ -0,0 +1,58 @@
---
base_model: google/gemma-3-270m-it
library_name: transformers
model_name: output
tags:
- generated_from_trainer
- sft
- trl
licence: license
---
# Model Card for output
This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it).
It has been trained using [TRL](https://github.com/huggingface/trl).
## Quick start
```python
from transformers import pipeline
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
generator = pipeline("text-generation", model="None", device="cuda")
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
print(output["generated_text"])
```
## Training procedure
This model was trained with SFT.
### Framework versions
- TRL: 1.1.0
- Transformers: 5.0.0
- Pytorch: 2.10.0+cu128
- Datasets: 4.8.4
- Tokenizers: 0.22.2
## Citations
Cite TRL as:
```bibtex
@software{vonwerra2020trl,
title = {{TRL: Transformers Reinforcement Learning}},
author = {von Werra, Leandro and Belkada, Younes and Tunstall, Lewis and Beeching, Edward and Thrush, Tristan and Lambert, Nathan and Huang, Shengyi and Rasul, Kashif and Gallouédec, Quentin},
license = {Apache-2.0},
url = {https://github.com/huggingface/trl},
year = {2020}
}
```

47
chat_template.jinja Normal file
View File

@@ -0,0 +1,47 @@
{{ bos_token }}
{%- if messages[0]['role'] == 'system' -%}
{%- if messages[0]['content'] is string -%}
{%- set first_user_prefix = messages[0]['content'] + '
' -%}
{%- else -%}
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '
' -%}
{%- endif -%}
{%- set loop_messages = messages[1:] -%}
{%- else -%}
{%- set first_user_prefix = "" -%}
{%- set loop_messages = messages -%}
{%- endif -%}
{%- for message in loop_messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif -%}
{%- if (message['role'] == 'assistant') -%}
{%- set role = "model" -%}
{%- else -%}
{%- set role = message['role'] -%}
{%- endif -%}
{{ '<start_of_turn>' + role + '
' + (first_user_prefix if loop.first else "") }}
{%- if message['content'] is string -%}
{{ message['content'] | trim }}
{%- elif message['content'] is iterable -%}
{%- for item in message['content'] -%}
{%- if item['type'] == 'image' -%}
{{ '<start_of_image>' }}
{%- elif item['type'] == 'text' -%}
{{ item['text'] | trim }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ raise_exception("Invalid content type") }}
{%- endif -%}
{{ '<end_of_turn>
' }}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{'<start_of_turn>model
'}}
{%- endif -%}

View File

@@ -0,0 +1,47 @@
{{ bos_token }}
{%- if messages[0]['role'] == 'system' -%}
{%- if messages[0]['content'] is string -%}
{%- set first_user_prefix = messages[0]['content'] + '
' -%}
{%- else -%}
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '
' -%}
{%- endif -%}
{%- set loop_messages = messages[1:] -%}
{%- else -%}
{%- set first_user_prefix = "" -%}
{%- set loop_messages = messages -%}
{%- endif -%}
{%- for message in loop_messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif -%}
{%- if (message['role'] == 'assistant') -%}
{%- set role = "model" -%}
{%- else -%}
{%- set role = message['role'] -%}
{%- endif -%}
{{ '<start_of_turn>' + role + '
' + (first_user_prefix if loop.first else "") }}
{%- if message['content'] is string -%}
{{ message['content'] | trim }}
{%- elif message['content'] is iterable -%}
{%- for item in message['content'] -%}
{%- if item['type'] == 'image' -%}
{{ '<start_of_image>' }}
{%- elif item['type'] == 'text' -%}
{{ item['text'] | trim }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ raise_exception("Invalid content type") }}
{%- endif -%}
{{ '<end_of_turn>
' }}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{'<start_of_turn>model
'}}
{%- endif -%}

View File

@@ -0,0 +1,62 @@
{
"_sliding_window_pattern": 6,
"architectures": [
"Gemma3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"attn_logit_softcapping": null,
"bos_token_id": 2,
"dtype": "bfloat16",
"eos_token_id": 1,
"final_logit_softcapping": null,
"head_dim": 256,
"hidden_activation": "gelu_pytorch_tanh",
"hidden_size": 640,
"initializer_range": 0.02,
"intermediate_size": 2048,
"layer_types": [
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"model_type": "gemma3_text",
"num_attention_heads": 4,
"num_hidden_layers": 18,
"num_key_value_heads": 1,
"pad_token_id": 0,
"query_pre_attn_scalar": 256,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"full_attention": {
"rope_theta": 1000000.0,
"rope_type": "default"
},
"sliding_attention": {
"rope_theta": 10000.0,
"rope_type": "default"
}
},
"sliding_window": 512,
"tie_word_embeddings": true,
"transformers_version": "5.0.0",
"use_bidirectional_attention": false,
"use_cache": false,
"vocab_size": 262144
}

View File

@@ -0,0 +1,13 @@
{
"bos_token_id": 2,
"cache_implementation": "hybrid",
"do_sample": true,
"eos_token_id": [
1,
106
],
"pad_token_id": 0,
"top_k": 64,
"top_p": 0.95,
"transformers_version": "5.0.0"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b18932534b14fbdeaba2cb8dd33a52e8a8c76ba207c75cc5c8644dcc1cea6078
size 536223056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a30cfd239dd45f9c0b5f150ca6524c38c603c52a5fe9860fe385cd0fddcce1f9
size 1546518667

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:61c19bab1174704a4a4441475683bf1270277af15d2e2c95e964789128e482c4
size 14645

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f0f38c137a365ec6f9e00fa0cd4932d42ec0e169efba559b60c69496c62fedda
size 1465

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a74aefb1dc1340a25f29ab8370384b9ed24b2d921d7749ece7bbcfcfdf00d497
size 33384443

View File

@@ -0,0 +1,24 @@
{
"backend": "tokenizers",
"boi_token": "<start_of_image>",
"bos_token": "<bos>",
"clean_up_tokenization_spaces": false,
"eoi_token": "<end_of_image>",
"eos_token": "<eos>",
"image_token": "<image_soft_token>",
"is_local": false,
"mask_token": "<mask>",
"model_max_length": 1000000000000000019884624838656,
"model_specific_special_tokens": {
"boi_token": "<start_of_image>",
"eoi_token": "<end_of_image>",
"image_token": "<image_soft_token>"
},
"pad_token": "<pad>",
"padding_side": "left",
"sp_model_kwargs": null,
"spaces_between_special_tokens": false,
"tokenizer_class": "GemmaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}

View File

@@ -0,0 +1,314 @@
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 284,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"entropy": 2.0583984375,
"epoch": 0.035211267605633804,
"grad_norm": 37.75,
"learning_rate": 1.9366197183098595e-05,
"loss": 1.7701053619384766,
"mean_token_accuracy": 0.6944564647972584,
"num_tokens": 7858.0,
"step": 10
},
{
"entropy": 1.5865234375,
"epoch": 0.07042253521126761,
"grad_norm": 31.75,
"learning_rate": 1.8661971830985917e-05,
"loss": 0.42496376037597655,
"mean_token_accuracy": 0.9247574791312217,
"num_tokens": 13594.0,
"step": 20
},
{
"entropy": 1.559375,
"epoch": 0.1056338028169014,
"grad_norm": 24.625,
"learning_rate": 1.7957746478873243e-05,
"loss": 0.3954581022262573,
"mean_token_accuracy": 0.9335559457540512,
"num_tokens": 19434.0,
"step": 30
},
{
"entropy": 1.4927734375,
"epoch": 0.14084507042253522,
"grad_norm": 27.0,
"learning_rate": 1.7253521126760565e-05,
"loss": 0.3182990312576294,
"mean_token_accuracy": 0.9380857452750206,
"num_tokens": 25149.0,
"step": 40
},
{
"entropy": 1.62119140625,
"epoch": 0.176056338028169,
"grad_norm": 26.875,
"learning_rate": 1.6549295774647887e-05,
"loss": 0.3523742437362671,
"mean_token_accuracy": 0.921422353386879,
"num_tokens": 31575.0,
"step": 50
},
{
"entropy": 1.616943359375,
"epoch": 0.2112676056338028,
"grad_norm": 36.25,
"learning_rate": 1.5845070422535213e-05,
"loss": 0.26896488666534424,
"mean_token_accuracy": 0.9397121250629425,
"num_tokens": 36872.0,
"step": 60
},
{
"entropy": 1.4833984375,
"epoch": 0.24647887323943662,
"grad_norm": 44.5,
"learning_rate": 1.5140845070422537e-05,
"loss": 0.3303802251815796,
"mean_token_accuracy": 0.9231527343392372,
"num_tokens": 43003.0,
"step": 70
},
{
"entropy": 1.506201171875,
"epoch": 0.28169014084507044,
"grad_norm": 38.75,
"learning_rate": 1.443661971830986e-05,
"loss": 0.36821565628051756,
"mean_token_accuracy": 0.9252683132886886,
"num_tokens": 48605.0,
"step": 80
},
{
"entropy": 1.64375,
"epoch": 0.31690140845070425,
"grad_norm": 33.5,
"learning_rate": 1.3732394366197184e-05,
"loss": 0.28042542934417725,
"mean_token_accuracy": 0.9455952301621438,
"num_tokens": 54883.0,
"step": 90
},
{
"entropy": 1.44912109375,
"epoch": 0.352112676056338,
"grad_norm": 35.75,
"learning_rate": 1.3028169014084508e-05,
"loss": 0.2935742616653442,
"mean_token_accuracy": 0.935359711945057,
"num_tokens": 61415.0,
"step": 100
},
{
"entropy": 1.38779296875,
"epoch": 0.3873239436619718,
"grad_norm": 21.625,
"learning_rate": 1.232394366197183e-05,
"loss": 0.28359940052032473,
"mean_token_accuracy": 0.942648071050644,
"num_tokens": 67390.0,
"step": 110
},
{
"entropy": 1.53544921875,
"epoch": 0.4225352112676056,
"grad_norm": 34.0,
"learning_rate": 1.1619718309859156e-05,
"loss": 0.2355732202529907,
"mean_token_accuracy": 0.9371909454464913,
"num_tokens": 72616.0,
"step": 120
},
{
"entropy": 1.603271484375,
"epoch": 0.45774647887323944,
"grad_norm": 26.75,
"learning_rate": 1.0915492957746481e-05,
"loss": 0.28939812183380126,
"mean_token_accuracy": 0.942104908823967,
"num_tokens": 78245.0,
"step": 130
},
{
"entropy": 1.43603515625,
"epoch": 0.49295774647887325,
"grad_norm": 18.625,
"learning_rate": 1.0211267605633803e-05,
"loss": 0.31623306274414065,
"mean_token_accuracy": 0.9423870608210564,
"num_tokens": 84939.0,
"step": 140
},
{
"entropy": 1.391650390625,
"epoch": 0.528169014084507,
"grad_norm": 25.875,
"learning_rate": 9.507042253521127e-06,
"loss": 0.24384639263153077,
"mean_token_accuracy": 0.9436959236860275,
"num_tokens": 90697.0,
"step": 150
},
{
"entropy": 1.46484375,
"epoch": 0.5633802816901409,
"grad_norm": 36.25,
"learning_rate": 8.802816901408451e-06,
"loss": 0.273523736000061,
"mean_token_accuracy": 0.9429585993289947,
"num_tokens": 98163.0,
"step": 160
},
{
"entropy": 1.57861328125,
"epoch": 0.5985915492957746,
"grad_norm": 23.5,
"learning_rate": 8.098591549295775e-06,
"loss": 0.2875859260559082,
"mean_token_accuracy": 0.9451341822743415,
"num_tokens": 104232.0,
"step": 170
},
{
"entropy": 1.43037109375,
"epoch": 0.6338028169014085,
"grad_norm": 24.25,
"learning_rate": 7.3943661971831e-06,
"loss": 0.22526137828826903,
"mean_token_accuracy": 0.9523040130734444,
"num_tokens": 109993.0,
"step": 180
},
{
"entropy": 1.50576171875,
"epoch": 0.6690140845070423,
"grad_norm": 13.9375,
"learning_rate": 6.690140845070423e-06,
"loss": 0.25973803997039796,
"mean_token_accuracy": 0.9431809738278389,
"num_tokens": 116723.0,
"step": 190
},
{
"entropy": 1.63857421875,
"epoch": 0.704225352112676,
"grad_norm": 22.0,
"learning_rate": 5.9859154929577465e-06,
"loss": 0.20339107513427734,
"mean_token_accuracy": 0.9520902916789055,
"num_tokens": 123876.0,
"step": 200
},
{
"entropy": 1.429931640625,
"epoch": 0.7394366197183099,
"grad_norm": 34.75,
"learning_rate": 5.28169014084507e-06,
"loss": 0.2633697032928467,
"mean_token_accuracy": 0.9487333759665489,
"num_tokens": 130683.0,
"step": 210
},
{
"entropy": 1.46591796875,
"epoch": 0.7746478873239436,
"grad_norm": 33.0,
"learning_rate": 4.577464788732395e-06,
"loss": 0.277258563041687,
"mean_token_accuracy": 0.9407993748784065,
"num_tokens": 136650.0,
"step": 220
},
{
"entropy": 1.404931640625,
"epoch": 0.8098591549295775,
"grad_norm": 19.75,
"learning_rate": 3.873239436619718e-06,
"loss": 0.20749433040618898,
"mean_token_accuracy": 0.9592674881219864,
"num_tokens": 142057.0,
"step": 230
},
{
"entropy": 1.37626953125,
"epoch": 0.8450704225352113,
"grad_norm": 12.0,
"learning_rate": 3.1690140845070427e-06,
"loss": 0.2674617528915405,
"mean_token_accuracy": 0.9528747946023941,
"num_tokens": 146942.0,
"step": 240
},
{
"entropy": 1.563525390625,
"epoch": 0.8802816901408451,
"grad_norm": 33.25,
"learning_rate": 2.4647887323943666e-06,
"loss": 0.2434919595718384,
"mean_token_accuracy": 0.9523143276572228,
"num_tokens": 153926.0,
"step": 250
},
{
"entropy": 1.628564453125,
"epoch": 0.9154929577464789,
"grad_norm": 16.375,
"learning_rate": 1.7605633802816902e-06,
"loss": 0.1847941756248474,
"mean_token_accuracy": 0.9619321092963219,
"num_tokens": 160139.0,
"step": 260
},
{
"entropy": 1.414111328125,
"epoch": 0.9507042253521126,
"grad_norm": 0.61328125,
"learning_rate": 1.0563380281690142e-06,
"loss": 0.14498558044433593,
"mean_token_accuracy": 0.9702936589717865,
"num_tokens": 164508.0,
"step": 270
},
{
"entropy": 1.513232421875,
"epoch": 0.9859154929577465,
"grad_norm": 13.5625,
"learning_rate": 3.521126760563381e-07,
"loss": 0.23086421489715575,
"mean_token_accuracy": 0.9467697784304618,
"num_tokens": 170291.0,
"step": 280
}
],
"logging_steps": 10,
"max_steps": 284,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 103970458857216.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:938f66a5a8da1319ebf777e3c97b055f4c688463db8c2616b0a794f762a642ee
size 5585

62
config.json Normal file
View File

@@ -0,0 +1,62 @@
{
"_sliding_window_pattern": 6,
"architectures": [
"Gemma3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"attn_logit_softcapping": null,
"bos_token_id": 2,
"dtype": "bfloat16",
"eos_token_id": 1,
"final_logit_softcapping": null,
"head_dim": 256,
"hidden_activation": "gelu_pytorch_tanh",
"hidden_size": 640,
"initializer_range": 0.02,
"intermediate_size": 2048,
"layer_types": [
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"model_type": "gemma3_text",
"num_attention_heads": 4,
"num_hidden_layers": 18,
"num_key_value_heads": 1,
"pad_token_id": 0,
"query_pre_attn_scalar": 256,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"full_attention": {
"rope_theta": 1000000.0,
"rope_type": "default"
},
"sliding_attention": {
"rope_theta": 10000.0,
"rope_type": "default"
}
},
"sliding_window": 512,
"tie_word_embeddings": true,
"transformers_version": "5.0.0",
"use_bidirectional_attention": false,
"use_cache": false,
"vocab_size": 262144
}

13
generation_config.json Normal file
View File

@@ -0,0 +1,13 @@
{
"bos_token_id": 2,
"cache_implementation": "hybrid",
"do_sample": true,
"eos_token_id": [
1,
106
],
"pad_token_id": 0,
"top_k": 64,
"top_p": 0.95,
"transformers_version": "5.0.0"
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b18932534b14fbdeaba2cb8dd33a52e8a8c76ba207c75cc5c8644dcc1cea6078
size 536223056

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a74aefb1dc1340a25f29ab8370384b9ed24b2d921d7749ece7bbcfcfdf00d497
size 33384443

24
tokenizer_config.json Normal file
View File

@@ -0,0 +1,24 @@
{
"backend": "tokenizers",
"boi_token": "<start_of_image>",
"bos_token": "<bos>",
"clean_up_tokenization_spaces": false,
"eoi_token": "<end_of_image>",
"eos_token": "<eos>",
"image_token": "<image_soft_token>",
"is_local": false,
"mask_token": "<mask>",
"model_max_length": 1000000000000000019884624838656,
"model_specific_special_tokens": {
"boi_token": "<start_of_image>",
"eoi_token": "<end_of_image>",
"image_token": "<image_soft_token>"
},
"pad_token": "<pad>",
"padding_side": "left",
"sp_model_kwargs": null,
"spaces_between_special_tokens": false,
"tokenizer_class": "GemmaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}

3
training_args.bin Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:938f66a5a8da1319ebf777e3c97b055f4c688463db8c2616b0a794f762a642ee
size 5585