commit 52b846f84c31cf2e66265b078ad69936203c9184 Author: ModelHub XC Date: Wed Apr 22 05:09:55 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: mradermacher/Gema4b2-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..dec2f5a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Gema4b2.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Gema4b2.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Gema4b2.IQ4_XS.gguf b/Gema4b2.IQ4_XS.gguf new file mode 100644 index 0000000..c1119da --- /dev/null +++ b/Gema4b2.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbf040ddc37d1b71c92e8883d2be716819f33fd826dcbf482ad68214026d7af7 +size 718168032 diff --git a/Gema4b2.Q2_K.gguf b/Gema4b2.Q2_K.gguf new file mode 100644 index 0000000..ff228a9 --- /dev/null +++ b/Gema4b2.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bbafde820789f7b9c87ce77025d48c4beebc1ec1bfea171e654f740eee10895 +size 689815008 diff --git a/Gema4b2.Q3_K_L.gguf b/Gema4b2.Q3_K_L.gguf new file mode 100644 index 0000000..623cc07 --- /dev/null +++ b/Gema4b2.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd704a951a48b872db8b26e5dcc8850a517f29236c9a2f0e721cc4f8b54fa039 +size 751576032 diff --git a/Gema4b2.Q3_K_M.gguf b/Gema4b2.Q3_K_M.gguf new file mode 100644 index 0000000..8738d4d --- /dev/null +++ b/Gema4b2.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c0302ddbb02a837e4d7642b73cac2d40749d034d27f3114ee0e5b82ebbdf45b +size 722416608 diff --git a/Gema4b2.Q3_K_S.gguf b/Gema4b2.Q3_K_S.gguf new file mode 100644 index 0000000..09570c6 --- /dev/null +++ b/Gema4b2.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bff3e4f8ef76cb40ac556c74e7c7bb8440da8695d5acd472af3c42fff8e3b59 +size 688856544 diff --git a/Gema4b2.Q4_K_M.gguf b/Gema4b2.Q4_K_M.gguf new file mode 100644 index 0000000..8c4e912 --- /dev/null +++ b/Gema4b2.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b73bdf09eff8470d6a12e3d19cef285fcd1b5c7224f1e1ea2dbda40db6ded55 +size 806058720 diff --git a/Gema4b2.Q4_K_S.gguf b/Gema4b2.Q4_K_S.gguf new file mode 100644 index 0000000..ce87c86 --- /dev/null +++ b/Gema4b2.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9315f35863713067d780f8dbaafdc40e3158b9a69b5f73123a7175f77498b21f +size 780993504 diff --git a/Gema4b2.Q5_K_M.gguf b/Gema4b2.Q5_K_M.gguf new file mode 100644 index 0000000..3e6dcd3 --- /dev/null +++ b/Gema4b2.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a139e40f537b375c943924495423e4de20d7a6edb361c5260cb2859bcc665390 +size 851346144 diff --git a/Gema4b2.Q5_K_S.gguf b/Gema4b2.Q5_K_S.gguf new file mode 100644 index 0000000..0b280ca --- /dev/null +++ b/Gema4b2.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1e46f246f825987673317a11d5ebd7292abc32acafd34a99f21289148c75292 +size 836400096 diff --git a/Gema4b2.Q6_K.gguf b/Gema4b2.Q6_K.gguf new file mode 100644 index 0000000..34d1a91 --- /dev/null +++ b/Gema4b2.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ab60e3b2f8df4279ae099355cb628520d99e1872c6bf27e11a6b3a2ab8ef49d +size 1011739104 diff --git a/Gema4b2.Q8_0.gguf b/Gema4b2.Q8_0.gguf new file mode 100644 index 0000000..8766439 --- /dev/null +++ b/Gema4b2.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:636d643e83dc77a732e373a849a1ab6bf3dfc389368dee74510978568e689a4a +size 1069306848 diff --git a/Gema4b2.f16.gguf b/Gema4b2.f16.gguf new file mode 100644 index 0000000..45d4a7b --- /dev/null +++ b/Gema4b2.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65d2945fef73dab492aa54dd68c16a607c767271082876124a4b979bd633bd03 +size 2006574048 diff --git a/README.md b/README.md new file mode 100644 index 0000000..b0422e8 --- /dev/null +++ b/README.md @@ -0,0 +1,74 @@ +--- +base_model: Alamaks/Gema4b2 +language: +- en +library_name: transformers +license: apache-2.0 +mradermacher: + readme_rev: 1 +quantized_by: mradermacher +tags: +- text-generation-inference +- transformers +- unsloth +- gemma3_text +--- +## About + + + + + + +static quants of https://huggingface.co/Alamaks/Gema4b2 + + + +***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#Gema4b2-GGUF).*** + +weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q3_K_S.gguf) | Q3_K_S | 0.8 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q2_K.gguf) | Q2_K | 0.8 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.IQ4_XS.gguf) | IQ4_XS | 0.8 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q3_K_M.gguf) | Q3_K_M | 0.8 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q3_K_L.gguf) | Q3_K_L | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q4_K_S.gguf) | Q4_K_S | 0.9 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q4_K_M.gguf) | Q4_K_M | 0.9 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q5_K_S.gguf) | Q5_K_S | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q5_K_M.gguf) | Q5_K_M | 1.0 | | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q6_K.gguf) | Q6_K | 1.1 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.Q8_0.gguf) | Q8_0 | 1.2 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/Gema4b2-GGUF/resolve/main/Gema4b2.f16.gguf) | f16 | 2.1 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +