初始化项目,由ModelHub XC社区提供模型

Model: Snehadev/smolified-eli10-ai-assistant
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-06 21:57:47 +08:00
commit 62fd8f43b5
7 changed files with 247 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

69
README.md Normal file
View File

@@ -0,0 +1,69 @@
---
license: apache-2.0
language:
- en
tags:
- text-generation-inference
- transformers
- smolify
- dslm
pipeline_tag: text-generation
inference:
parameters:
temperature: 1
top_p: 0.95
top_k: 64
---
# 🤏 smolified-eli10-ai-assistant
> **Intelligence, Distilled.**
This is a **Domain Specific Language Model (DSLM)** generated by the **Smolify Foundry**.
It has been synthetically distilled from SOTA reasoning engines into a high-efficiency architecture, optimized for deployment on edge hardware (CPU/NPU) or low-VRAM environments.
## 📦 Asset Details
- **Origin:** Smolify Foundry (Job ID: `91ccdda8`)
- **Architecture:** gemma-3-270m
- **Training Method:** Proprietary Neural Distillation
- **Optimization:** 4-bit Quantized / FP16 Mixed
- **Dataset:** [Link to Dataset](https://huggingface.co/datasets/Snehadev/smolified-eli10-ai-assistant)
## 🚀 Usage (Inference)
This model is compatible with standard inference backends like vLLM, and Hugging Face Transformers.
```python
# Example: Running your Sovereign Model
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "Snehadev/smolified-eli10-ai-assistant"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
messages = [
{"role": "system", "content": '''Explain complex topics to a 10-year-old child using short sentences and simple words.'''},
{"role": "user", "content": '''What is Wi-Fi?'''}
]
text = tokenizer.apply_chat_template(
messages,
tokenize = False,
add_generation_prompt = True,
)
if "gemma-3-270m" == "gemma-3-270m":
text = text.removeprefix('<bos>')
from transformers import TextStreamer
_ = model.generate(
**tokenizer(text, return_tensors = "pt").to(model.device),
max_new_tokens = 1000,
temperature = 1.0, top_p = 0.95, top_k = 64,
streamer = TextStreamer(tokenizer, skip_prompt = True),
)
```
## ⚖️ License & Ownership
This model weights are a sovereign asset owned by **Snehadev**.
Generated via [Smolify.ai](https://smolify.ai).
[<img src="https://smolify.ai/smolify.gif" width="100"/>](https://smolify.ai)

47
chat_template.jinja Normal file
View File

@@ -0,0 +1,47 @@
{{ bos_token }}
{%- if messages[0]['role'] == 'system' -%}
{%- if messages[0]['content'] is string -%}
{%- set first_user_prefix = messages[0]['content'] + '
' -%}
{%- else -%}
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '
' -%}
{%- endif -%}
{%- set loop_messages = messages[1:] -%}
{%- else -%}
{%- set first_user_prefix = "" -%}
{%- set loop_messages = messages -%}
{%- endif -%}
{%- for message in loop_messages -%}
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
{%- endif -%}
{%- if (message['role'] == 'assistant') -%}
{%- set role = "model" -%}
{%- else -%}
{%- set role = message['role'] -%}
{%- endif -%}
{{ '<start_of_turn>' + role + '
' + (first_user_prefix if loop.first else "") }}
{%- if message['content'] is string -%}
{{ message['content'] | trim }}
{%- elif message['content'] is iterable -%}
{%- for item in message['content'] -%}
{%- if item['type'] == 'image' -%}
{{ '<start_of_image>' }}
{%- elif item['type'] == 'text' -%}
{{ item['text'] | trim }}
{%- endif -%}
{%- endfor -%}
{%- else -%}
{{ raise_exception("Invalid content type") }}
{%- endif -%}
{{ '<end_of_turn>
' }}
{%- endfor -%}
{%- if add_generation_prompt -%}
{{ '<start_of_turn>model
' }}
{%- endif -%}

64
config.json Normal file
View File

@@ -0,0 +1,64 @@
{
"_sliding_window_pattern": 6,
"architectures": [
"Gemma3ForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"attn_logit_softcapping": null,
"bos_token_id": 2,
"torch_dtype": "bfloat16",
"eos_token_id": 106,
"final_logit_softcapping": null,
"head_dim": 256,
"hidden_activation": "gelu_pytorch_tanh",
"hidden_size": 640,
"initializer_range": 0.02,
"intermediate_size": 2048,
"layer_types": [
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"sliding_attention",
"full_attention"
],
"max_position_embeddings": 32768,
"model_name": "unsloth/gemma-3-270m-it",
"model_type": "gemma3_text",
"num_attention_heads": 4,
"num_hidden_layers": 18,
"num_key_value_heads": 1,
"pad_token_id": 0,
"query_pre_attn_scalar": 256,
"rms_norm_eps": 1e-06,
"rope_parameters": {
"full_attention": {
"rope_theta": 1000000.0,
"rope_type": "default"
},
"sliding_attention": {
"rope_theta": 10000.0,
"rope_type": "default"
}
},
"sliding_window": 512,
"tie_word_embeddings": true,
"unsloth_fixed": true,
"unsloth_version": "2026.3.11",
"use_bidirectional_attention": false,
"use_cache": false,
"vocab_size": 262144
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a340f8f34196c8559d264ecb692622cb7f55c42e64b8add79125f9ba5768ef31
size 536223056

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a74aefb1dc1340a25f29ab8370384b9ed24b2d921d7749ece7bbcfcfdf00d497
size 33384443

25
tokenizer_config.json Normal file
View File

@@ -0,0 +1,25 @@
{
"backend": "tokenizers",
"boi_token": "<start_of_image>",
"bos_token": "<bos>",
"clean_up_tokenization_spaces": false,
"eoi_token": "<end_of_image>",
"eos_token": "<end_of_turn>",
"image_token": "<image_soft_token>",
"is_local": false,
"mask_token": "<mask>",
"model_max_length": 32768,
"model_specific_special_tokens": {
"boi_token": "<start_of_image>",
"eoi_token": "<end_of_image>",
"image_token": "<image_soft_token>"
},
"pad_token": "<pad>",
"padding_side": "left",
"sp_model_kwargs": null,
"spaces_between_special_tokens": false,
"tokenizer_class": "GemmaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false,
"chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{ '<start_of_turn>model\n' }}\n{%- endif -%}\n"
}