初始化项目,由ModelHub XC社区提供模型
Model: mradermacher/Gema4b-GGUF Source: Original Platform
This commit is contained in:
47
.gitattributes
vendored
Normal file
47
.gitattributes
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
Gema4b.f16.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
3
Gema4b.IQ4_XS.gguf
Normal file
3
Gema4b.IQ4_XS.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:02c02f89e9c906225d509d9a2473a8b91e08c6f6b5540c0a82f5b928110f4dae
|
||||
size 718168096
|
||||
3
Gema4b.Q2_K.gguf
Normal file
3
Gema4b.Q2_K.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:87d5d232c5fd1930b53024aa710a0f404b68b25ced8d5c32244b7e4b571275f9
|
||||
size 689815072
|
||||
3
Gema4b.Q3_K_L.gguf
Normal file
3
Gema4b.Q3_K_L.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:82ce95eb060abd19fb6cd3cb4d93539b0cd62617d21cba0ace8c4bf01f712eab
|
||||
size 751576096
|
||||
3
Gema4b.Q3_K_M.gguf
Normal file
3
Gema4b.Q3_K_M.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:60731b11971c412e8899ca2a75461acd273ed79f72e0781bebe0cb6865c59ace
|
||||
size 722416672
|
||||
3
Gema4b.Q3_K_S.gguf
Normal file
3
Gema4b.Q3_K_S.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:638971d10e36790c0f0f5cacb9e77e3c95fb80c7344ecbbf0387ca0ec41491e9
|
||||
size 688856608
|
||||
3
Gema4b.Q4_K_M.gguf
Normal file
3
Gema4b.Q4_K_M.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:94084aeda92acd79ea14a836634dd3bc25c1dc51bfb24e30d19d655cdf3f7bbd
|
||||
size 806058784
|
||||
3
Gema4b.Q4_K_S.gguf
Normal file
3
Gema4b.Q4_K_S.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8d2f2438fd04eba9d3c6fa9df98819e014acafe815cc66c495e15a2d8f10bad9
|
||||
size 780993568
|
||||
3
Gema4b.Q5_K_M.gguf
Normal file
3
Gema4b.Q5_K_M.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:deef7e7484dcedd1487dbe9bfb82605edaf55b585618447a8ec9e2cdb7d32fe9
|
||||
size 851346208
|
||||
3
Gema4b.Q5_K_S.gguf
Normal file
3
Gema4b.Q5_K_S.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:127e8b983a1796f6af3e1543f72e85a0f47b1a5a577981e8dbe05ddfc90f5c91
|
||||
size 836400160
|
||||
3
Gema4b.Q6_K.gguf
Normal file
3
Gema4b.Q6_K.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:84419a01e10cc95a430de8055061bf2a2b8311500f4f2b4e868de3f9203498c3
|
||||
size 1011739168
|
||||
3
Gema4b.Q8_0.gguf
Normal file
3
Gema4b.Q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3b7e4e7bed3964b6d1d76739b7e743aa0130cfe2756a64dab53f583368fe9560
|
||||
size 1069306912
|
||||
3
Gema4b.f16.gguf
Normal file
3
Gema4b.f16.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b0575c42274df6f3af2e850f5cde99aaff3878f9fc3633bba62f3672757b85bd
|
||||
size 2006574112
|
||||
74
README.md
Normal file
74
README.md
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
base_model: Alamaks/Gema4b
|
||||
language:
|
||||
- en
|
||||
library_name: transformers
|
||||
license: apache-2.0
|
||||
mradermacher:
|
||||
readme_rev: 1
|
||||
quantized_by: mradermacher
|
||||
tags:
|
||||
- text-generation-inference
|
||||
- transformers
|
||||
- unsloth
|
||||
- gemma3_text
|
||||
---
|
||||
## About
|
||||
|
||||
<!-- ### quantize_version: 2 -->
|
||||
<!-- ### output_tensor_quantised: 1 -->
|
||||
<!-- ### convert_type: hf -->
|
||||
<!-- ### vocab_type: -->
|
||||
<!-- ### tags: -->
|
||||
static quants of https://huggingface.co/Alamaks/Gema4b
|
||||
|
||||
<!-- provided-files -->
|
||||
|
||||
***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#Gema4b-GGUF).***
|
||||
|
||||
weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion.
|
||||
## Usage
|
||||
|
||||
If you are unsure how to use GGUF files, refer to one of [TheBloke's
|
||||
READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for
|
||||
more details, including on how to concatenate multi-part files.
|
||||
|
||||
## Provided Quants
|
||||
|
||||
(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants)
|
||||
|
||||
| Link | Type | Size/GB | Notes |
|
||||
|:-----|:-----|--------:|:------|
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q3_K_S.gguf) | Q3_K_S | 0.8 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q2_K.gguf) | Q2_K | 0.8 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.IQ4_XS.gguf) | IQ4_XS | 0.8 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q3_K_M.gguf) | Q3_K_M | 0.8 | lower quality |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q3_K_L.gguf) | Q3_K_L | 0.9 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q4_K_S.gguf) | Q4_K_S | 0.9 | fast, recommended |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q4_K_M.gguf) | Q4_K_M | 0.9 | fast, recommended |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q5_K_S.gguf) | Q5_K_S | 0.9 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q5_K_M.gguf) | Q5_K_M | 1.0 | |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q6_K.gguf) | Q6_K | 1.1 | very good quality |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.Q8_0.gguf) | Q8_0 | 1.2 | fast, best quality |
|
||||
| [GGUF](https://huggingface.co/mradermacher/Gema4b-GGUF/resolve/main/Gema4b.f16.gguf) | f16 | 2.1 | 16 bpw, overkill |
|
||||
|
||||
Here is a handy graph by ikawrakow comparing some lower-quality quant
|
||||
types (lower is better):
|
||||
|
||||

|
||||
|
||||
And here are Artefact2's thoughts on the matter:
|
||||
https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9
|
||||
|
||||
## FAQ / Model Request
|
||||
|
||||
See https://huggingface.co/mradermacher/model_requests for some answers to
|
||||
questions you might have and/or if you want some other model quantized.
|
||||
|
||||
## Thanks
|
||||
|
||||
I thank my company, [nethype GmbH](https://www.nethype.de/), for letting
|
||||
me use its servers and providing upgrades to my workstation to enable
|
||||
this work in my free time.
|
||||
|
||||
<!-- end -->
|
||||
Reference in New Issue
Block a user