From 42a2b0eb04e2732c96101f1a0b4d2277c87b6eef Mon Sep 17 00:00:00 2001 From: team mradermacher Date: Sun, 14 Sep 2025 10:13:01 +0000 Subject: [PATCH] auto-patch README.md --- README.md | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/README.md b/README.md index 63d37c8..8a50d9d 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,29 @@ +--- +base_model: unmodeled-tyler/vanta-research-apollo-v1-4b +datasets: +- nvidia/HelpSteer2 +- openai/collective-alignment-1 +- nvidia/OpenScienceReasoning-2 +- Anthropic/hh-rlhf +- EleutherAI/hendrycks_math +language: en +library_name: transformers +license: apache-2.0 +mradermacher: + readme_rev: 1 +quantized_by: mradermacher +tags: +- text-generation +- conversational +- reasoning +- parameter-efficient +- lora +- phi4 +- apollo +- vanta-research +--- +## About + @@ -7,3 +33,54 @@ static quants of https://huggingface.co/unmodeled-tyler/vanta-research-apollo-v1-4b + + + +***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#vanta-research-apollo-v1-4b-GGUF).*** + +weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q2_K.gguf) | Q2_K | 1.8 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q3_K_S.gguf) | Q3_K_S | 2.0 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q3_K_M.gguf) | Q3_K_M | 2.2 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.IQ4_XS.gguf) | IQ4_XS | 2.3 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q3_K_L.gguf) | Q3_K_L | 2.3 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q4_K_S.gguf) | Q4_K_S | 2.4 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q4_K_M.gguf) | Q4_K_M | 2.6 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q5_K_S.gguf) | Q5_K_S | 2.8 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q5_K_M.gguf) | Q5_K_M | 2.9 | | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q6_K.gguf) | Q6_K | 3.3 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.Q8_0.gguf) | Q8_0 | 4.2 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/vanta-research-apollo-v1-4b-GGUF/resolve/main/vanta-research-apollo-v1-4b.f16.gguf) | f16 | 7.8 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +