commit f579e0cedec97483a9c2513e2ad8df81ab8085c8 Author: ModelHub XC Date: Fri Apr 24 20:51:03 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..4f1a799 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Gemma-2-Llama-Swallow-2b-it-v0.1.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.IQ4_XS.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.IQ4_XS.gguf new file mode 100644 index 0000000..6135e68 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4b4abe8dff310fc24bfcb0e8c6ce323ddce7ef8d9df8c7c08af323700c9463 +size 1576205760 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q2_K.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q2_K.gguf new file mode 100644 index 0000000..17663a3 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df20485ada3eb66a2486fa89001744e5f15e680d691ad4c38a41418d2df9d306 +size 1229831616 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_L.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_L.gguf new file mode 100644 index 0000000..148cedd --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0e67fca08add70cb989dbb0570a4cb3e4c63909d89450e27c275289e7d4e621 +size 1550437824 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_M.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_M.gguf new file mode 100644 index 0000000..68ff6d3 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d746a18362f393446f7aeca2139600bff7dec739e114d34eccf34fd7633847f +size 1461669312 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_S.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_S.gguf new file mode 100644 index 0000000..db874bd --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63994f6954bd5292c32a9e89d5a326ed55f9c1eea4c8e15461f5008962df7087 +size 1360661952 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_M.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_M.gguf new file mode 100644 index 0000000..736b6f2 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7f178b2a320a67b81c3f58da77baa8e10b0d148e4e4e11a05a32b64b069d3c6 +size 1708584384 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_S.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_S.gguf new file mode 100644 index 0000000..b4fc6d4 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a60b8cec860fcf3e86a751da9a4921590893f6c7f098fbd7d7bad1ab24fb590 +size 1638653376 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_M.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_M.gguf new file mode 100644 index 0000000..4732cbc --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21d1a966ceeec20a86b76cf0c10774dba4ea6ebdabaeb8300f7b14b9893590d1 +size 1923280320 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_S.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_S.gguf new file mode 100644 index 0000000..4b179dc --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec94accc91b6c573b9ce4669c24d34bb2ec998021f1f3deef6c7570134ebb38 +size 1882545600 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q6_K.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q6_K.gguf new file mode 100644 index 0000000..868d524 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:901fce70457a5ad7fb804dc34bb2fa56d6ce8654cc57327c8380674fa524b713 +size 2151394752 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.Q8_0.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q8_0.gguf new file mode 100644 index 0000000..375bf7e --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddee2b3a4acce6909837af7db1e97470d4d103ad4be8a972b2c94756f141a70d +size 2784497088 diff --git a/Gemma-2-Llama-Swallow-2b-it-v0.1.f16.gguf b/Gemma-2-Llama-Swallow-2b-it-v0.1.f16.gguf new file mode 100644 index 0000000..79bd914 --- /dev/null +++ b/Gemma-2-Llama-Swallow-2b-it-v0.1.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1f3c82b6e05a06c1ee93e9bef9c847fa4cda212e760d2dbdb4b145320312b18 +size 5235215808 diff --git a/README.md b/README.md new file mode 100644 index 0000000..7afe464 --- /dev/null +++ b/README.md @@ -0,0 +1,73 @@ +--- +base_model: tokyotech-llm/Gemma-2-Llama-Swallow-2b-it-v0.1 +datasets: +- tokyotech-llm/lmsys-chat-1m-synth +- tokyotech-llm/swallow-magpie-ultra-v0.1 +- tokyotech-llm/swallow-gemma-magpie-v0.1 +- lmsys/lmsys-chat-1m +- argilla/magpie-ultra-v0.1 +language: +- en +- ja +library_name: transformers +license: +- gemma +- llama3.3 +quantized_by: mradermacher +--- +## About + + + + + + +static quants of https://huggingface.co/tokyotech-llm/Gemma-2-Llama-Swallow-2b-it-v0.1 + + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q2_K.gguf) | Q2_K | 1.3 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_S.gguf) | Q3_K_S | 1.5 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_M.gguf) | Q3_K_M | 1.6 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q3_K_L.gguf) | Q3_K_L | 1.7 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.IQ4_XS.gguf) | IQ4_XS | 1.7 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_S.gguf) | Q4_K_S | 1.7 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q4_K_M.gguf) | Q4_K_M | 1.8 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_S.gguf) | Q5_K_S | 2.0 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q5_K_M.gguf) | Q5_K_M | 2.0 | | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q6_K.gguf) | Q6_K | 2.3 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.Q8_0.gguf) | Q8_0 | 2.9 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/Gemma-2-Llama-Swallow-2b-it-v0.1-GGUF/resolve/main/Gemma-2-Llama-Swallow-2b-it-v0.1.f16.gguf) | f16 | 5.3 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +