commit c26b88e5c92f7537ee3ec0c19a5ee2031d9fa27a Author: ModelHub XC Date: Fri May 1 20:23:35 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: pnsrc/lfm2.5-me-merged Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..52373fe --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +tokenizer.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..45daddd --- /dev/null +++ b/README.md @@ -0,0 +1,21 @@ +--- +base_model: unsloth/gemma-3-1b-it-bnb-4bit +tags: +- text-generation-inference +- transformers +- unsloth +- gemma3_text +license: apache-2.0 +language: +- en +--- + +# Uploaded finetuned model + +- **Developed by:** pnsrc +- **License:** apache-2.0 +- **Finetuned from model :** unsloth/gemma-3-1b-it-bnb-4bit + +This gemma3_text model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. + +[](https://github.com/unslothai/unsloth) diff --git a/chat_template.jinja b/chat_template.jinja new file mode 100644 index 0000000..0420fe7 --- /dev/null +++ b/chat_template.jinja @@ -0,0 +1,8 @@ +{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{'user +' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + ' +'}}{% set messages = messages[2:] %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' %}{{'user +' + message['content'] | trim + ' +'}}{% elif message['role'] == 'assistant' %}{{'model +' + message['content'] | trim + ' +' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'model +' }}{% endif %} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..aed8a7b --- /dev/null +++ b/config.json @@ -0,0 +1,74 @@ +{ + "_sliding_window_pattern": 6, + "architectures": [ + "Gemma3ForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "attn_logit_softcapping": null, + "bos_token_id": 2, + "cache_implementation": "hybrid", + "torch_dtype": "float16", + "eos_token_id": 106, + "final_logit_softcapping": null, + "head_dim": 256, + "hidden_activation": "gelu_pytorch_tanh", + "hidden_size": 1152, + "initializer_range": 0.02, + "intermediate_size": 6912, + "layer_types": [ + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "sliding_attention", + "full_attention", + "sliding_attention", + "sliding_attention" + ], + "max_position_embeddings": 32768, + "model_name": "unsloth/gemma-3-1b-it-bnb-4bit", + "model_type": "gemma3_text", + "num_attention_heads": 4, + "num_hidden_layers": 26, + "num_key_value_heads": 1, + "pad_token_id": 0, + "query_pre_attn_scalar": 256, + "rms_norm_eps": 1e-06, + "rope_parameters": { + "full_attention": { + "rope_theta": 1000000, + "rope_type": "default" + }, + "sliding_attention": { + "rope_theta": 10000, + "rope_type": "default" + } + }, + "sliding_window": 512, + "sliding_window_pattern": 6, + "tie_word_embeddings": true, + "unsloth_fixed": true, + "unsloth_version": "2026.4.6", + "use_bidirectional_attention": false, + "use_cache": false, + "vocab_size": 262144 +} \ No newline at end of file diff --git a/model.safetensors b/model.safetensors new file mode 100644 index 0000000..097464b --- /dev/null +++ b/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b36c2ad6a1f7c1148c19a1f212f3301ffd840166f53c80bd5fb61ff29263422c +size 1999811208 diff --git a/tokenizer.json b/tokenizer.json new file mode 100644 index 0000000..f74d183 --- /dev/null +++ b/tokenizer.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daab2354f8a74e70d70b4d1f804939b68a8c9624dd06cb7858e52dd8970e9726 +size 33384567 diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..7c7e695 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,11 @@ +{ + "backend": "tokenizers", + "bos_token": "", + "eos_token": "", + "mask_token": "", + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "tokenizer_class": "GemmaTokenizer", + "unk_token": "", + "chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{'user\n' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + '\n'}}{% set messages = messages[2:] %}{% endif %}{% for message in messages %}{% if message['role'] == 'user' %}{{'user\n' + message['content'] | trim + '\n'}}{% elif message['role'] == 'assistant' %}{{'model\n' + message['content'] | trim + '\n' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'model\n' }}{% endif %}" +} \ No newline at end of file