初始化项目,由ModelHub XC社区提供模型

Model: gaianet/SmolLM2-1.7B-Instruct-GGUF
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-10 05:47:18 +08:00
commit b4ddfd43f5
16 changed files with 157 additions and 0 deletions

48
.gitattributes vendored Normal file
View File

@@ -0,0 +1,48 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
SmolLM2-1.7B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text

35
README.md Normal file
View File

@@ -0,0 +1,35 @@
---
base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
license: apache-2.0
library_name: transformers
model_creator: HuggingFaceTB
model_name: SmolLM2-1.7B-Instruct
quantized_by: Second State Inc.
language:
- en
---
# SmolLM2-1.7B-Instruct-GGUF
## Original Model
[HuggingFaceTB/SmolLM2-1.7B-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct)
## Run with Gaianet
**Prompt template:**
prompt template: `chatml`
**Context size:**
chat_ctx_size: `2048`
**Run with GaiaNet:**
- Quick start: https://docs.gaianet.ai/node-guide/quick-start
- Customize your node: https://docs.gaianet.ai/node-guide/customize
*Quantized with llama.cpp b4120*

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2a81276a962be228e83df40e1be833b5735a902974e635d4931d6bc5d76053e0
size 674583392

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:722e0f3846c0cd6613fc93580da4c1165de956d3626c55aed0b230a50194987f
size 932533088

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a2ce91737dfa2127c4fce6604b29218567f6343b16c560aab93efa025f958ecb
size 860181344

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5c34cbe3aeca752c09b39089a1c4fb521d7cb4699cc080cc6de9b03980c5b42d
size 776819552

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7d202c4c42d0bd299551cc099ac4fb2c12228b03f3bfa0d06cb00ab7f011d577
size 990729056

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fab954d296aa25885960ed771a005343c220ac93ef78a53454e667e334935fe9
size 1055609696

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3b73b7bdaf7106c7a21da146b8ac4fd732afecffe591fb06f43313cdd57a4259
size 999117664

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:23ecf83012ddcbb17c8b9227b2cf7f6a4abff4b1ebfe2fec5caf9d7318243551
size 1192055648

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:57b99e35d3538d90385687402ab5cbaba76ba52618071f72fc467689b1ddaffa
size 1225479008

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:dfe968c323792f71eff38719450aee55263bf7964bb9fddf61e33e9f2e540147
size 1192055648

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5a5e500d36887cb68be1b24ab7adefb07ea77b7d2e0d1000095025b5f7bc8a6f
size 1405965152

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1812c04a6a47bf08da5d352d89978712db8225cb17d571c3c12805dd33411e6b
size 1820414816

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:84f3bea5ec6149793d9087205c433f11ac0de5602b942b59d5399773786a0b83
size 3424736096

35
config.json Normal file
View File

@@ -0,0 +1,35 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_act": "silu",
"hidden_size": 2048,
"initializer_range": 0.02,
"intermediate_size": 8192,
"max_position_embeddings": 8192,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 24,
"num_key_value_heads": 32,
"pad_token_id": 2,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 130000,
"tie_word_embeddings": true,
"torch_dtype": "bfloat16",
"transformers_version": "4.42.3",
"transformers.js_config": {
"kv_cache_dtype": {
"q4f16": "float16",
"fp16": "float16"
}
},
"use_cache": true,
"vocab_size": 49152
}