初始化项目,由ModelHub XC社区提供模型

Model: prithivMLmods/Kapteyn-500M
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-11 20:13:58 +08:00
commit 9c0a9f5ab6
23 changed files with 328 additions and 0 deletions

47
.gitattributes vendored Normal file
View File

@@ -0,0 +1,47 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

3
Kapteyn-500M.BF16.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9f5d019cd559b0258abe6570833cbbe6967efe17a8070d44159171c36fa095c2
size 1028009536

3
Kapteyn-500M.F16.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0e5ab76b048cf3f72b3f1d74217159b8363554f7c2a48159374b719d186029f8
size 1028009536

3
Kapteyn-500M.F32.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:eb30a259a1e591a82a80457218e2b8d841ae4d7866f09e524f85e2e9e78bd982
size 2055089728

3
Kapteyn-500M.Q2_K.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:94f0b4e2234027a565af60142cd883b4780223a78767024faa1620520d2db68d
size 209601088

3
Kapteyn-500M.Q3_K_L.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:58c38c4a10aa63c18a6054f48cb5dd800c1b08f1004d0b95f7b8e161db6281d4
size 281341504

3
Kapteyn-500M.Q3_K_M.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5f3951aa14858311260adf78a808870ace96566cd793dcbbfc7a3433405e7f78
size 262762048

3
Kapteyn-500M.Q3_K_S.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cbecbca5419b0c6b3ebc7d1fa366c1b1fb18f2fb5b41fc2f04cd448b0a449b83
size 240791104

3
Kapteyn-500M.Q4_K_M.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7658ceeafb408e5ee7e7a18b0abcfb0a2a72ee7bcdccef1d399692f28a31a06e
size 317876800

3
Kapteyn-500M.Q4_K_S.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a898bd325da8f65e870040dc2bf80fdb2f541add1a5aebc2c009cc94a91491f5
size 304630336

3
Kapteyn-500M.Q5_K_M.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2bb67b4ef2e2af8d5c27883245cad0d5268703d7afcd22b09354caf1eaabeecc
size 368454208

3
Kapteyn-500M.Q5_K_S.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a3c0c0805b107dab24ba39bd52a38f6e3264f2a2cadbb2cdf45ba30590474775
size 360516160

3
Kapteyn-500M.Q6_K.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5e6045f51b3e78badc8bd183386560c2c036e32c04458c211a160717d1d76a83
size 422192704

3
Kapteyn-500M.Q8_0.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ed55c6e2af5df54926c65aa5db52aad95df25f271b2243cd391a762d8b332df8
size 546565696

104
README.md Normal file
View File

@@ -0,0 +1,104 @@
---
license: apache-2.0
language:
- en
pipeline_tag: text-generation
library_name: transformers
tags:
- text-generation-inference
---
![4.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/EaSsXHEv3KS2hMQWJ7mEf.png)
# **Kapteyn-500M**
> **Kapteyn-500M** is a lightweight, general-purpose micro language model based on the **LlamaForCausalLM architecture** and trained on the **Llama2 Group of models**. This compact 500M parameter model is designed for **simple chats and responses**, making it ideal for conversational AI applications where efficiency and quick response times are prioritized over complex reasoning tasks.
---
## **Key Features**
1. **Compact & Efficient Architecture**
Built on the proven **LlamaForCausalLM architecture** with only 500M parameters, ensuring fast inference and low memory footprint for resource-constrained environments.
2. **General-Purpose Conversational AI**
Optimized for natural dialogue, casual conversations, and simple Q&A tasks—perfect for chatbots, virtual assistants, and interactive applications.
3. **Llama2-Based Training**
Leverages the robust foundation of the **Llama2 Group of models**, inheriting their conversational capabilities while maintaining ultra-lightweight deployment requirements.
4. **Fast Response Generation**
Designed for quick inference with minimal latency, making it suitable for real-time chat applications and interactive user experiences.
5. **Versatile Deployment Options**
Runs efficiently on **CPUs**, **entry-level GPUs**, **mobile devices**, and **edge computing platforms** with minimal resource requirements.
6. **Simple Integration**
Easy to integrate into existing applications with standard transformer interfaces and minimal setup requirements.
---
## **Quickstart with Transformers**
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "prithivMLmods/Kapteyn-500M"
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = "Hello! How are you doing today?"
messages = [
{"role": "system", "content": "You are a helpful and friendly assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_p=0.9
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
```
---
## **Intended Use**
* Casual conversation and general chat applications
* Simple Q&A systems and customer service bots
* Educational tools requiring basic conversational interaction
* Mobile and edge AI applications with limited computational resources
* Prototyping conversational AI features before scaling to larger models
* Personal assistants for everyday tasks and simple information retrieval
---
## **Limitations**
* Limited complex reasoning and analytical capabilities compared to larger models
* Not suitable for specialized technical, scientific, or mathematical tasks
* Context window limitations may affect longer conversations
* May struggle with nuanced or highly specialized domain knowledge
* Optimized for simple responses rather than detailed explanations or complex problem-solving.

1
chat_template.jinja Normal file
View File

@@ -0,0 +1 @@
{% for message in messages %}{% if message['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% if ((message['role'] == 'user') != (loop.index0 % 2 == 0)) or ((message['role'] == 'assistant') != (loop.index0 % 2 == 1)) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '<|prompt|>' + message['content'].strip() + eos_token }}{% elif message['role'] == 'assistant' %}{{ '<|answer|>' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|answer|>' }}{% endif %}

31
config.json Normal file
View File

@@ -0,0 +1,31 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"head_dim": 96,
"hidden_act": "silu",
"hidden_size": 1536,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 8192,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 16,
"num_hidden_layers": 16,
"num_key_value_heads": 8,
"pad_token_id": 0,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": null,
"rope_theta": 100000,
"sliding_window": null,
"tie_word_embeddings": false,
"torch_dtype": "float16",
"transformers_version": "4.52.4",
"use_cache": true,
"vocab_size": 32000
}

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}

8
generation_config.json Normal file
View File

@@ -0,0 +1,8 @@
{
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"max_length": 8192,
"pad_token_id": 0,
"transformers_version": "4.52.4"
}

3
model.safetensors Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bc1a458f887101320d7f3d129b085d631fa41a4c1fbd5ee921cb001e0f85ca5a
size 1027198144

44
special_tokens_map.json Normal file
View File

@@ -0,0 +1,44 @@
{
"bos_token": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"cls_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"sep_token": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

BIN
tokenizer.model (Stored with Git LFS) Normal file

Binary file not shown.

47
tokenizer_config.json Normal file
View File

@@ -0,0 +1,47 @@
{
"add_bos_token": false,
"add_eos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [],
"bos_token": "<s>",
"clean_up_tokenization_spaces": false,
"cls_token": "</s>",
"eos_token": "</s>",
"extra_special_tokens": {},
"legacy": false,
"model_max_length": 8192,
"pad_token": "<unk>",
"padding_side": "right",
"sep_token": "</s>",
"sp_model_kwargs": {},
"spaces_between_special_tokens": false,
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}