From f67915190cc994ca81dde88fc37c78c39a610846 Mon Sep 17 00:00:00 2001 From: Michael Radermacher Date: Wed, 3 Jul 2024 07:17:06 +0000 Subject: [PATCH] auto-patch README.md --- README.md | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/README.md b/README.md index 3ea5ac9..562d84a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,71 @@ +--- +base_model: flammenai/Mahou-1.3-gemma2-9B +datasets: +- flammenai/FlameMix-DPO-v1 +- flammenai/MahouMix-v1 +- flammenai/Grill-Flammen-v1_chatML +language: +- en +library_name: transformers +license: gemma +quantized_by: mradermacher +--- +## About + static quants of https://huggingface.co/flammenai/Mahou-1.3-gemma2-9B + + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q2_K.gguf) | Q2_K | 3.9 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.IQ3_XS.gguf) | IQ3_XS | 4.2 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q3_K_S.gguf) | Q3_K_S | 4.4 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q3_K_M.gguf) | Q3_K_M | 4.9 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q3_K_L.gguf) | Q3_K_L | 5.2 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.IQ4_XS.gguf) | IQ4_XS | 5.3 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q4_K_M.gguf) | Q4_K_M | 5.9 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q5_K_S.gguf) | Q5_K_S | 6.6 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q5_K_M.gguf) | Q5_K_M | 6.7 | | +| [GGUF](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q6_K.gguf) | Q6_K | 7.7 | very good quality | +| [PART 1](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.IQ3_S.gguf) [PART 2](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/Mahou-1.3-gemma2-9B.IQ3_S.gguf) | IQ3_S | 8.8 | beats Q3_K* | +| [PART 1](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.IQ3_M.gguf) [PART 2](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/Mahou-1.3-gemma2-9B.IQ3_M.gguf) | IQ3_M | 9.1 | | +| [PART 1](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q4_K_S.gguf) [PART 2](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/Mahou-1.3-gemma2-9B.Q4_K_S.gguf) | Q4_K_S | 11.1 | fast, recommended | +| [PART 1](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.Q8_0.gguf) [PART 2](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/Mahou-1.3-gemma2-9B.Q8_0.gguf) | Q8_0 | 19.8 | fast, best quality | +| [PART 1](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/.f16.gguf) [PART 2](https://huggingface.co/mradermacher/Mahou-1.3-gemma2-9B-GGUF/resolve/main/Mahou-1.3-gemma2-9B.f16.gguf) | f16 | 37.1 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +