commit 799fb5f91a3a5dc3a1e1c2adf3c4cf215790c9df Author: ModelHub XC Date: Fri Apr 24 23:08:33 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: N-Bot-Int/MistThena7BV2-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..4b8a3c4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,38 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +unsloth.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +unsloth.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +unsloth.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..ed2f8e5 --- /dev/null +++ b/README.md @@ -0,0 +1,42 @@ +--- +base_model: +- N-Bot-Int/MistThena7B-V2 +tags: +- text-generation-inference +- transformers +- mistral +- rp +- gguf +language: +- en +license: apache-2.0 +datasets: +- N-Bot-Int/Iris-Uncensored-R2 +- N-Bot-Int/Millie-R1_DPO +- N-Bot-Int/Millia-R1_DPO +--- +# Support Us Through + - [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/J3J61D8NHV) + - [https://ko-fi.com/nexusnetworkint](Official Ko-FI link!) + +![image/png](https://cdn-uploads.huggingface.co/production/uploads/6633a73004501e16e7896b86/Ks9_kWvjksA3bG0sHe4Of.png) +# GGUF Version + **GGUF** with Quants! Allowing you to run models using KoboldCPP and other AI Environments! + + +# Quantizations: +| Quant Type | Benefits | Cons | +|---------------|---------------------------------------------------|---------------------------------------------------| +| **Q4_K_M** | ✅ Smallest size (fastest inference) | ❌ Lowest accuracy compared to other quants | +| | ✅ Requires the least VRAM/RAM | ❌ May struggle with complex reasoning | +| | ✅ Ideal for edge devices & low-resource setups | ❌ Can produce slightly degraded text quality | +| **Q5_K_M** | ✅ Better accuracy than Q4, while still compact | ❌ Slightly larger model size than Q4 | +| | ✅ Good balance between speed and precision | ❌ Needs a bit more VRAM than Q4 | +| | ✅ Works well on mid-range GPUs | ❌ Still not as accurate as higher-bit models | +| **Q8_0** | ✅ Highest accuracy (closest to full model) | ❌ Requires significantly more VRAM/RAM | +| | ✅ Best for complex reasoning & detailed outputs | ❌ Slower inference compared to Q4 & Q5 | +| | ✅ Suitable for high-end GPUs & serious workloads | ❌ Larger file size (takes more storage) | + +# Model Details: + Read the Model details on huggingface + [Model Detail Here!](https://huggingface.co/N-Bot-Int/MistThena7B-V2) \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..9f0f76f --- /dev/null +++ b/config.json @@ -0,0 +1,3 @@ +{ + "model_type": "mistral" +} \ No newline at end of file diff --git a/unsloth.Q4_K_M.gguf b/unsloth.Q4_K_M.gguf new file mode 100644 index 0000000..d8e702c --- /dev/null +++ b/unsloth.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfcf411e2e63f886a13a67ea080db1f6ae015ac9741c4564ee798e954cf58b3b +size 4372815456 diff --git a/unsloth.Q5_K_M.gguf b/unsloth.Q5_K_M.gguf new file mode 100644 index 0000000..a96c47b --- /dev/null +++ b/unsloth.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e472608d935d3ef145ead41c4c7a23e255e5ab4cfd3a6f19253efb5de68909b7 +size 5136178784 diff --git a/unsloth.Q8_0.gguf b/unsloth.Q8_0.gguf new file mode 100644 index 0000000..ce5b58b --- /dev/null +++ b/unsloth.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9810a4ae429c3be7671fe696a9c72829e74385b05f329c7743ea4a5431215405 +size 7702568544