commit 0905e41084d2e5a808505810468f0c3ff185e979 Author: ModelHub XC Date: Sat Apr 11 03:22:58 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: mradermacher/Q2.5-Veltha-14B-0.5-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..035311b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,46 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Q2.5-Veltha-14B-0.5.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Q2.5-Veltha-14B-0.5.IQ4_XS.gguf b/Q2.5-Veltha-14B-0.5.IQ4_XS.gguf new file mode 100644 index 0000000..efbb446 --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a75b43ec1ff3664852a40dbe2f80e17b1f3dd2d708c8415940f6380122769cd +size 8183427040 diff --git a/Q2.5-Veltha-14B-0.5.Q2_K.gguf b/Q2.5-Veltha-14B-0.5.Q2_K.gguf new file mode 100644 index 0000000..52f1428 --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0da95a88036a3987c1d8a5bd104149e5996d25985e5f8d318adb0b79d70ec57 +size 5768144096 diff --git a/Q2.5-Veltha-14B-0.5.Q3_K_L.gguf b/Q2.5-Veltha-14B-0.5.Q3_K_L.gguf new file mode 100644 index 0000000..f2deeaf --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:742a1b6a1b4058f1a0d5aa20f4984b33a8e256ed1f39221e450cf61d7c639173 +size 7922207328 diff --git a/Q2.5-Veltha-14B-0.5.Q3_K_M.gguf b/Q2.5-Veltha-14B-0.5.Q3_K_M.gguf new file mode 100644 index 0000000..d06a08c --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:977fe000f75d0f865915c42be5423712d90c5cf530b194b03167e427dde5d393 +size 7336643168 diff --git a/Q2.5-Veltha-14B-0.5.Q3_K_S.gguf b/Q2.5-Veltha-14B-0.5.Q3_K_S.gguf new file mode 100644 index 0000000..47c1aa9 --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:187ec79cf8f876b9fbe3313c7a20b4213a2e9307d9942d2ba8fb1fed6437e2e7 +size 6657034848 diff --git a/Q2.5-Veltha-14B-0.5.Q4_K_M.gguf b/Q2.5-Veltha-14B-0.5.Q4_K_M.gguf new file mode 100644 index 0000000..6bb6fda --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49967b5d1ba92ecc092c0dd31e00ae12b5a9f788d68fce173da026e618b8bb44 +size 8985278080 diff --git a/Q2.5-Veltha-14B-0.5.Q4_K_S.gguf b/Q2.5-Veltha-14B-0.5.Q4_K_S.gguf new file mode 100644 index 0000000..1e38fbb --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7814d222bc3a693c095da728f9fbdcb64d84d335c43f541732ed7c26872b0135 +size 8570599040 diff --git a/Q2.5-Veltha-14B-0.5.Q5_K_M.gguf b/Q2.5-Veltha-14B-0.5.Q5_K_M.gguf new file mode 100644 index 0000000..7c3f6ed --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce332e6727589c9fd85aaa7d420b1a9ab6b16f31edd42390e4b999095e8176b1 +size 10505785600 diff --git a/Q2.5-Veltha-14B-0.5.Q5_K_S.gguf b/Q2.5-Veltha-14B-0.5.Q5_K_S.gguf new file mode 100644 index 0000000..12fd1da --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2bcec6f820d8356eeacfefa83d067e61f6d601ed37ebc344ad57b11a3c93434 +size 10263466240 diff --git a/Q2.5-Veltha-14B-0.5.Q6_K.gguf b/Q2.5-Veltha-14B-0.5.Q6_K.gguf new file mode 100644 index 0000000..261847e --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ecfb4f87bbac22dc18f6a64d3e7b151b7f25ed0472ea752bd800fd7c20fb067 +size 12121324864 diff --git a/Q2.5-Veltha-14B-0.5.Q8_0.gguf b/Q2.5-Veltha-14B-0.5.Q8_0.gguf new file mode 100644 index 0000000..1807b18 --- /dev/null +++ b/Q2.5-Veltha-14B-0.5.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c07cbaea548a56df04eaf6f4316697d9855ae2b925943aed6754bed3a46d4800 +size 15697249216 diff --git a/README.md b/README.md new file mode 100644 index 0000000..24ee009 --- /dev/null +++ b/README.md @@ -0,0 +1,65 @@ +--- +base_model: djuna/Q2.5-Veltha-14B-0.5 +language: +- en +library_name: transformers +quantized_by: mradermacher +tags: +- mergekit +- merge +--- +## About + + + + + + +static quants of https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5 + + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q2_K.gguf) | Q2_K | 5.9 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q3_K_S.gguf) | Q3_K_S | 6.8 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q3_K_M.gguf) | Q3_K_M | 7.4 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q3_K_L.gguf) | Q3_K_L | 8.0 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.IQ4_XS.gguf) | IQ4_XS | 8.3 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q4_K_S.gguf) | Q4_K_S | 8.7 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q4_K_M.gguf) | Q4_K_M | 9.1 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q5_K_S.gguf) | Q5_K_S | 10.4 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q5_K_M.gguf) | Q5_K_M | 10.6 | | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q6_K.gguf) | Q6_K | 12.2 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/Q2.5-Veltha-14B-0.5-GGUF/resolve/main/Q2.5-Veltha-14B-0.5.Q8_0.gguf) | Q8_0 | 15.8 | fast, best quality | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +