From 4c821857dd8953c7810cd9bb3c3ed8f54752bd28 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Tue, 5 May 2026 13:42:21 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: hoangton/gemma-2b-GGUF Source: Original Platform --- .gitattributes | 49 ++++++++++++++++++++++++++++++++++++++++++++ README.md | 41 ++++++++++++++++++++++++++++++++++++ config.json | 27 ++++++++++++++++++++++++ gemma-2b.Q2_K.gguf | 3 +++ gemma-2b.Q3_K_L.gguf | 3 +++ gemma-2b.Q3_K_M.gguf | 3 +++ gemma-2b.Q3_K_S.gguf | 3 +++ gemma-2b.Q4_0.gguf | 3 +++ gemma-2b.Q4_1.gguf | 3 +++ gemma-2b.Q4_K_M.gguf | 3 +++ gemma-2b.Q4_K_S.gguf | 3 +++ gemma-2b.Q5_0.gguf | 3 +++ gemma-2b.Q5_1.gguf | 3 +++ gemma-2b.Q5_K_M.gguf | 3 +++ gemma-2b.Q5_K_S.gguf | 3 +++ gemma-2b.Q6_K.gguf | 3 +++ gemma-2b.Q8_0.gguf | 3 +++ 17 files changed, 159 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 config.json create mode 100644 gemma-2b.Q2_K.gguf create mode 100644 gemma-2b.Q3_K_L.gguf create mode 100644 gemma-2b.Q3_K_M.gguf create mode 100644 gemma-2b.Q3_K_S.gguf create mode 100644 gemma-2b.Q4_0.gguf create mode 100644 gemma-2b.Q4_1.gguf create mode 100644 gemma-2b.Q4_K_M.gguf create mode 100644 gemma-2b.Q4_K_S.gguf create mode 100644 gemma-2b.Q5_0.gguf create mode 100644 gemma-2b.Q5_1.gguf create mode 100644 gemma-2b.Q5_K_M.gguf create mode 100644 gemma-2b.Q5_K_S.gguf create mode 100644 gemma-2b.Q6_K.gguf create mode 100644 gemma-2b.Q8_0.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..253b4db --- /dev/null +++ b/.gitattributes @@ -0,0 +1,49 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +gemma-2b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..e86e5df --- /dev/null +++ b/README.md @@ -0,0 +1,41 @@ +--- +library_name: transformers +tags: [] +extra_gated_heading: "Access Gemma on Hugging Face" +extra_gated_prompt: "To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately." +extra_gated_button_content: "Acknowledge license" +license: other +license_name: gemma-terms-of-use +license_link: https://ai.google.dev/gemma/terms +--- + +# Gemma-2B GGUF + +This is a quantized version of the [google/gemma-2b](https://huggingface.co/google/gemma-2b) model using [llama.cpp](https://github.com/ggerganov/llama.cpp). + +This model card corresponds to the 2B base version of the Gemma model. You can also visit the model card of the [7B base model](https://huggingface.co/google/gemma-7b), [7B instruct model](https://huggingface.co/google/gemma-7b-it), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). + +**Model Page**: [Gemma](https://ai.google.dev/gemma/docs) + +**Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) + +## ⚡ Quants + +* `q2_k`: Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors. +* `q3_k_l`: Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K +* `q3_k_m`: Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K +* `q3_k_s`: Uses Q3_K for all tensors +* `q4_0`: Original quant method, 4-bit. +* `q4_1`: Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models. +* `q4_k_m`: Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K +* `q4_k_s`: Uses Q4_K for all tensors +* `q5_0`: Higher accuracy, higher resource usage and slower inference. +* `q5_1`: Even higher accuracy, resource usage and slower inference. +* `q5_k_m`: Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K +* `q5_k_s`: Uses Q5_K for all tensors +* `q6_k`: Uses Q8_K for all tensors +* `q8_0`: Almost indistinguishable from float16. High resource use and slow. Not recommended for most users. + +## 💻 Usage + +This model can be used with the latest version of llama.cpp and LM Studio >0.2.16. \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..6f50d29 --- /dev/null +++ b/config.json @@ -0,0 +1,27 @@ +{ + "architectures": [ + "GemmaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 2, + "eos_token_id": 1, + "head_dim": 256, + "hidden_act": "gelu", + "hidden_size": 3072, + "initializer_range": 0.02, + "intermediate_size": 24576, + "max_position_embeddings": 8192, + "model_type": "gemma", + "num_attention_heads": 16, + "num_hidden_layers": 28, + "num_key_value_heads": 16, + "pad_token_id": 0, + "rms_norm_eps": 1e-06, + "rope_scaling": null, + "rope_theta": 10000.0, + "torch_dtype": "bfloat16", + "transformers_version": "4.38.0.dev0", + "use_cache": true, + "vocab_size": 256000 +} diff --git a/gemma-2b.Q2_K.gguf b/gemma-2b.Q2_K.gguf new file mode 100644 index 0000000..8415258 --- /dev/null +++ b/gemma-2b.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e987bf77ffff2d873e5b06310dfe8847f03178cdd27812c84f95d8c5c488c59 +size 899875936 diff --git a/gemma-2b.Q3_K_L.gguf b/gemma-2b.Q3_K_L.gguf new file mode 100644 index 0000000..395d395 --- /dev/null +++ b/gemma-2b.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f2a686bdf24e52055ee55b323ddd2a077d99bdb370db1d114d96415227e67a2 +size 1260790880 diff --git a/gemma-2b.Q3_K_M.gguf b/gemma-2b.Q3_K_M.gguf new file mode 100644 index 0000000..c0f47ff --- /dev/null +++ b/gemma-2b.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3711a880951eba56dc201e0160154cff115f9a96cf0b56a8041d7136daa55f2 +size 1179001952 diff --git a/gemma-2b.Q3_K_S.gguf b/gemma-2b.Q3_K_S.gguf new file mode 100644 index 0000000..a3925ff --- /dev/null +++ b/gemma-2b.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5ccf460c7491354923e8be50978b962e3c8e742df0d7e9466578dccf95bfdbd +size 1083180128 diff --git a/gemma-2b.Q4_0.gguf b/gemma-2b.Q4_0.gguf new file mode 100644 index 0000000..38fe5d5 --- /dev/null +++ b/gemma-2b.Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4acaf847c0c4b51888f4ba9362e031216a07629c1cbf30d5db89d460719e810e +size 1416021088 diff --git a/gemma-2b.Q4_1.gguf b/gemma-2b.Q4_1.gguf new file mode 100644 index 0000000..664877e --- /dev/null +++ b/gemma-2b.Q4_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a58d042d6eda5436f47c4ca125cba401c7b5ec67ec4194bbbfaa473fa76295 +size 1572652128 diff --git a/gemma-2b.Q4_K_M.gguf b/gemma-2b.Q4_K_M.gguf new file mode 100644 index 0000000..4195d59 --- /dev/null +++ b/gemma-2b.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37d50c21ef7847926204ad9b3007127d9a2722188cfd240ce7f9f7f041aa71a5 +size 1495094368 diff --git a/gemma-2b.Q4_K_S.gguf b/gemma-2b.Q4_K_S.gguf new file mode 100644 index 0000000..b83c90e --- /dev/null +++ b/gemma-2b.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:588b26a208ef99d7cefa7137ef074257e51250c7096cecad51349fdc941647ac +size 1424671840 diff --git a/gemma-2b.Q5_0.gguf b/gemma-2b.Q5_0.gguf new file mode 100644 index 0000000..0802508 --- /dev/null +++ b/gemma-2b.Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4164b683fddc18bdc78d355f49264239a045ad5a54f8deb5e4b1d07d3498d13a +size 1729283168 diff --git a/gemma-2b.Q5_1.gguf b/gemma-2b.Q5_1.gguf new file mode 100644 index 0000000..85e2202 --- /dev/null +++ b/gemma-2b.Q5_1.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97498ad926467c426d2ee87da99b54afd840e5dec6b65fa3b1c9de2f7f5d6b5a +size 1885914208 diff --git a/gemma-2b.Q5_K_M.gguf b/gemma-2b.Q5_K_M.gguf new file mode 100644 index 0000000..c21a370 --- /dev/null +++ b/gemma-2b.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:226f65d1a2463a35baad4c94af0d5deb4df29ea2a15c208de00be80989f508eb +size 1770017888 diff --git a/gemma-2b.Q5_K_S.gguf b/gemma-2b.Q5_K_S.gguf new file mode 100644 index 0000000..48a1251 --- /dev/null +++ b/gemma-2b.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee20c0c2046d06ab9c4447fa3abaa880bfbaa1816de483ded5f1a060edfbf9be +size 1729283168 diff --git a/gemma-2b.Q6_K.gguf b/gemma-2b.Q6_K.gguf new file mode 100644 index 0000000..bc4f6a0 --- /dev/null +++ b/gemma-2b.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29f42096801c3c6b1537e60f9416abfd103d5c4a37312ac437170485d61a4375 +size 2062124128 diff --git a/gemma-2b.Q8_0.gguf b/gemma-2b.Q8_0.gguf new file mode 100644 index 0000000..d1ced2f --- /dev/null +++ b/gemma-2b.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ef61ca7b06ea0afba21ee65922e066f7ae8e1a35bb151cf41846251bfb1a5a8 +size 2669069408