From 0e047c74db75bbe3131bd0f579f994511c6822e5 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 9 May 2026 09:56:29 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: mradermacher/Neuralpy-v1-GGUF Source: Original Platform --- .gitattributes | 47 +++++++++++++++++++++++++++ Neuralpy-v1.IQ4_XS.gguf | 3 ++ Neuralpy-v1.Q2_K.gguf | 3 ++ Neuralpy-v1.Q3_K_L.gguf | 3 ++ Neuralpy-v1.Q3_K_M.gguf | 3 ++ Neuralpy-v1.Q3_K_S.gguf | 3 ++ Neuralpy-v1.Q4_K_M.gguf | 3 ++ Neuralpy-v1.Q4_K_S.gguf | 3 ++ Neuralpy-v1.Q5_K_M.gguf | 3 ++ Neuralpy-v1.Q5_K_S.gguf | 3 ++ Neuralpy-v1.Q6_K.gguf | 3 ++ Neuralpy-v1.Q8_0.gguf | 3 ++ Neuralpy-v1.f16.gguf | 3 ++ README.md | 71 +++++++++++++++++++++++++++++++++++++++++ 14 files changed, 154 insertions(+) create mode 100644 .gitattributes create mode 100644 Neuralpy-v1.IQ4_XS.gguf create mode 100644 Neuralpy-v1.Q2_K.gguf create mode 100644 Neuralpy-v1.Q3_K_L.gguf create mode 100644 Neuralpy-v1.Q3_K_M.gguf create mode 100644 Neuralpy-v1.Q3_K_S.gguf create mode 100644 Neuralpy-v1.Q4_K_M.gguf create mode 100644 Neuralpy-v1.Q4_K_S.gguf create mode 100644 Neuralpy-v1.Q5_K_M.gguf create mode 100644 Neuralpy-v1.Q5_K_S.gguf create mode 100644 Neuralpy-v1.Q6_K.gguf create mode 100644 Neuralpy-v1.Q8_0.gguf create mode 100644 Neuralpy-v1.f16.gguf create mode 100644 README.md diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..fda0949 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Neuralpy-v1.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Neuralpy-v1.IQ4_XS.gguf b/Neuralpy-v1.IQ4_XS.gguf new file mode 100644 index 0000000..d8ebc92 --- /dev/null +++ b/Neuralpy-v1.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ee64b5a481f8912151b89db95682826058f013f6028be70130d8a4ed91d6fc6 +size 609808256 diff --git a/Neuralpy-v1.Q2_K.gguf b/Neuralpy-v1.Q2_K.gguf new file mode 100644 index 0000000..92406b0 --- /dev/null +++ b/Neuralpy-v1.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17812758c363f0d9ab6706722728f3d7d62c81dcf910a4afa87f518a99b8aecf +size 432131968 diff --git a/Neuralpy-v1.Q3_K_L.gguf b/Neuralpy-v1.Q3_K_L.gguf new file mode 100644 index 0000000..34b967a --- /dev/null +++ b/Neuralpy-v1.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77dbddf0e8d6dcbf8135a42f7ac1d9f8b2bb11cd89d622c9d188e9f00f04bdc5 +size 591527808 diff --git a/Neuralpy-v1.Q3_K_M.gguf b/Neuralpy-v1.Q3_K_M.gguf new file mode 100644 index 0000000..ffd8ce4 --- /dev/null +++ b/Neuralpy-v1.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3ea810a2799b2551feb36ab846adb4891ab865e409f3e0cb4d242f3c906e149 +size 548405120 diff --git a/Neuralpy-v1.Q3_K_S.gguf b/Neuralpy-v1.Q3_K_S.gguf new file mode 100644 index 0000000..992e835 --- /dev/null +++ b/Neuralpy-v1.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:164d3a523269282684b844124dfcf5e0b5a30daf062b238b1edc69f2133e2b85 +size 499343232 diff --git a/Neuralpy-v1.Q4_K_M.gguf b/Neuralpy-v1.Q4_K_M.gguf new file mode 100644 index 0000000..424e015 --- /dev/null +++ b/Neuralpy-v1.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a27850935818dc6b692434503971992ef50b20cdfbfbc69bb55374f9709107e +size 667815808 diff --git a/Neuralpy-v1.Q4_K_S.gguf b/Neuralpy-v1.Q4_K_S.gguf new file mode 100644 index 0000000..bfc7b35 --- /dev/null +++ b/Neuralpy-v1.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d5ff8130054dcab0dc101dfc230bd661c31a0698d51fdddb57c3f3ba62559c +size 639872896 diff --git a/Neuralpy-v1.Q5_K_M.gguf b/Neuralpy-v1.Q5_K_M.gguf new file mode 100644 index 0000000..d33353c --- /dev/null +++ b/Neuralpy-v1.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86cfd4fffeb376fa5963e20fff33cb7bb55f71c334883c35e50e8b28920f1065 +size 782045056 diff --git a/Neuralpy-v1.Q5_K_S.gguf b/Neuralpy-v1.Q5_K_S.gguf new file mode 100644 index 0000000..814d3e7 --- /dev/null +++ b/Neuralpy-v1.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f4245b2ff0d10d3c7aa468fc970b680e85070032ea2827f805b4da929a36b34 +size 766029696 diff --git a/Neuralpy-v1.Q6_K.gguf b/Neuralpy-v1.Q6_K.gguf new file mode 100644 index 0000000..d5c6903 --- /dev/null +++ b/Neuralpy-v1.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a688528fb67ecd0e3a6d530d1bc3daf3b52d6406aca50c3dd713f8290bcdf7c1 +size 903413632 diff --git a/Neuralpy-v1.Q8_0.gguf b/Neuralpy-v1.Q8_0.gguf new file mode 100644 index 0000000..b6c0f63 --- /dev/null +++ b/Neuralpy-v1.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0a85c77d916619acc6c3eb8192fc44afac1d5ed7fcd4aaf92658c3ec4c6b79 +size 1169809280 diff --git a/Neuralpy-v1.f16.gguf b/Neuralpy-v1.f16.gguf new file mode 100644 index 0000000..e601a75 --- /dev/null +++ b/Neuralpy-v1.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58e2b309ee87bf1f070eb4f956f79ba18aefc76b42792e9ccf6f0db1835936af +size 2201018240 diff --git a/README.md b/README.md new file mode 100644 index 0000000..aea0e3e --- /dev/null +++ b/README.md @@ -0,0 +1,71 @@ +--- +base_model: rahul-shrivastav/Neuralpy-v1 +language: +- en +library_name: transformers +mradermacher: + readme_rev: 1 +quantized_by: mradermacher +--- +## About + + + + + + + + + +static quants of https://huggingface.co/rahul-shrivastav/Neuralpy-v1 + + + +***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#Neuralpy-v1-GGUF).*** + +weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q2_K.gguf) | Q2_K | 0.5 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q3_K_S.gguf) | Q3_K_S | 0.6 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q3_K_M.gguf) | Q3_K_M | 0.6 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q3_K_L.gguf) | Q3_K_L | 0.7 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.IQ4_XS.gguf) | IQ4_XS | 0.7 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q4_K_S.gguf) | Q4_K_S | 0.7 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q4_K_M.gguf) | Q4_K_M | 0.8 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q5_K_S.gguf) | Q5_K_S | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q5_K_M.gguf) | Q5_K_M | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q6_K.gguf) | Q6_K | 1.0 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.Q8_0.gguf) | Q8_0 | 1.3 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/Neuralpy-v1-GGUF/resolve/main/Neuralpy-v1.f16.gguf) | f16 | 2.3 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +