Update README.md

This commit is contained in:
ai-modelscope
2024-06-28 09:23:43 +08:00
parent b5dde03514
commit 2163605abc
26 changed files with 205 additions and 54 deletions

47
.gitattributes vendored
View File

@@ -1,37 +1,58 @@
*.7z filter=lfs diff=lfs merge=lfs -text *.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ2_S.gguf filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ2_XS.gguf filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ3_XXS.gguf filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text gemma-2-9b-it-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q4_K_L.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q5_K_L.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q6_K_L.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-Q8_0_L.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it-f32.gguf filter=lfs diff=lfs merge=lfs -text
gemma-2-9b-it.imatrix filter=lfs diff=lfs merge=lfs -text

142
README.md
View File

@@ -1,47 +1,107 @@
--- ---
license: Apache License 2.0 license: gemma
library_name: transformers
#model-type: pipeline_tag: text-generation
##如 gpt、phi、llama、chatglm、baichuan 等 extra_gated_heading: Access Gemma on Hugging Face
#- gpt extra_gated_prompt: >-
To access Gemma on Hugging Face, youre required to review and agree to
#domain: Googles usage license. To do this, please ensure youre logged in to Hugging
##如 nlp、cv、audio、multi-modal Face and click below. Requests are processed immediately.
#- nlp extra_gated_button_content: Acknowledge license
tags:
#language: - conversational
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa quantized_by: bartowski
#- cn
#metrics:
##如 CIDEr、Blue、ROUGE 等
#- CIDEr
#tags:
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
#- pretrained
#tools:
##如 vllm、fastchat、llamacpp、AdaSeq 等
#- vllm
--- ---
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
#### 您可以通过如下git clone命令或者ModelScope SDK来下载模型
SDK下载 ## Llamacpp imatrix Quantizations of gemma-2-9b-it
```bash
#安装ModelScope Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> PR <a href="https://github.com/ggerganov/llama.cpp/pull/8156">8156</a> for quantization.
pip install modelscope
Original model: https://huggingface.co/google/gemma-2-9b-it
All quants made using imatrix option with dataset from [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8)
## Prompt format
``` ```
```python <bos><start_of_turn>user
#SDK模型下载 {prompt}<end_of_turn>
from modelscope import snapshot_download <start_of_turn>model
model_dir = snapshot_download('LLM-Research/gemma-2-9b-it-GGUF')
```
Git下载
```
#Git模型下载
git clone https://www.modelscope.cn/LLM-Research/gemma-2-9b-it-GGUF.git
``` ```
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p> Note that this model does not support a System prompt.
## Download a file (not the whole branch) from below:
| Filename | Quant type | File Size | Description |
| -------- | ---------- | --------- | ----------- |
| [gemma-2-9b-it-Q8_0_L.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q8_1.gguf) | Q8_0_L | 10.68GB | *Experimental*, uses f16 for embed and output weights. Please provide any feedback of differences. Extremely high quality, generally unneeded but max available quant. |
| [gemma-2-9b-it-Q8_0.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q8_0.gguf) | Q8_0 | 9.82GB | Extremely high quality, generally unneeded but max available quant. |
| [gemma-2-9b-it-Q6_K_L.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q6_K_L.gguf) | Q6_K_L | 8.67GB | *Experimental*, uses f16 for embed and output weights. Please provide any feedback of differences. Very high quality, near perfect, *recommended*. |
| [gemma-2-9b-it-Q6_K.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q6_K.gguf) | Q6_K | 7.58GB | Very high quality, near perfect, *recommended*. |
| [gemma-2-9b-it-Q5_K_L.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q5_K_L.gguf) | Q5_K_L | 7.73GB | *Experimental*, uses f16 for embed and output weights. Please provide any feedback of differences. High quality, *recommended*. |
| [gemma-2-9b-it-Q5_K_M.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q5_K_M.gguf) | Q5_K_M | 6.64GB | High quality, *recommended*. |
| [gemma-2-9b-it-Q5_K_S.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q5_K_S.gguf) | Q5_K_S | 6.48GB | High quality, *recommended*. |
| [gemma-2-9b-it-Q4_K_L.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q4_K_L.gguf) | Q4_K_L | 6.84GB | *Experimental*, uses f16 for embed and output weights. Please provide any feedback of differences. Good quality, uses about 4.83 bits per weight, *recommended*. |
| [gemma-2-9b-it-Q4_K_M.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q4_K_M.gguf) | Q4_K_M | 5.76GB | Good quality, uses about 4.83 bits per weight, *recommended*. |
| [gemma-2-9b-it-Q4_K_S.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q4_K_S.gguf) | Q4_K_S | 5.47GB | Slightly lower quality with more space savings, *recommended*. |
| [gemma-2-9b-it-IQ4_XS.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ4_XS.gguf) | IQ4_XS | 5.18GB | Decent quality, smaller than Q4_K_S with similar performance, *recommended*. |
| [gemma-2-9b-it-Q3_K_L.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q3_K_L.gguf) | Q3_K_L | 5.13GB | Lower quality but usable, good for low RAM availability. |
| [gemma-2-9b-it-Q3_K_M.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q3_K_M.gguf) | Q3_K_M | 4.76GB | Even lower quality. |
| [gemma-2-9b-it-IQ3_M.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ3_M.gguf) | IQ3_M | 4.49GB | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
| [gemma-2-9b-it-Q3_K_S.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q3_K_S.gguf) | Q3_K_S | 4.33GB | Low quality, not recommended. |
| [gemma-2-9b-it-IQ3_XS.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ3_XS.gguf) | IQ3_XS | 4.14GB | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
| [gemma-2-9b-it-IQ3_XXS.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ3_XXS.gguf) | IQ3_XXS | 3.79GB | Lower quality, new method with decent performance, comparable to Q3 quants. |
| [gemma-2-9b-it-Q2_K.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-Q2_K.gguf) | Q2_K | 3.80GB | Very low quality but surprisingly usable. |
| [gemma-2-9b-it-IQ2_M.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ2_M.gguf) | IQ2_M | 3.43GB | Very low quality, uses SOTA techniques to also be surprisingly usable. |
| [gemma-2-9b-it-IQ2_S.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ2_S.gguf) | IQ2_S | 3.21GB | Very low quality, uses SOTA techniques to be usable. |
| [gemma-2-9b-it-IQ2_XS.gguf](https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/blob/main/gemma-2-9b-it-IQ2_XS.gguf) | IQ2_XS | 3.06GB | Very low quality, uses SOTA techniques to be usable. |
## Downloading using huggingface-cli
First, make sure you have hugginface-cli installed:
```
pip install -U "huggingface_hub[cli]"
```
Then, you can target the specific file you want:
```
huggingface-cli download bartowski/gemma-2-9b-it-GGUF --include "gemma-2-9b-it-Q4_K_M.gguf" --local-dir ./
```
If the model is bigger than 50GB, it will have been split into multiple files. In order to download them all to a local folder, run:
```
huggingface-cli download bartowski/gemma-2-9b-it-GGUF --include "gemma-2-9b-it-Q8_0.gguf/*" --local-dir gemma-2-9b-it-Q8_0
```
You can either specify a new local-dir (gemma-2-9b-it-Q8_0) or download them all in place (./)
## Which file should I choose?
A great write up with charts showing various performances is provided by Artefact2 [here](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9)
The first thing to figure out is how big a model you can run. To do this, you'll need to figure out how much RAM and/or VRAM you have.
If you want your model running as FAST as possible, you'll want to fit the whole thing on your GPU's VRAM. Aim for a quant with a file size 1-2GB smaller than your GPU's total VRAM.
If you want the absolute maximum quality, add both your system RAM and your GPU's VRAM together, then similarly grab a quant with a file size 1-2GB Smaller than that total.
Next, you'll need to decide if you want to use an 'I-quant' or a 'K-quant'.
If you don't want to think too much, grab one of the K-quants. These are in format 'QX_K_X', like Q5_K_M.
If you want to get more into the weeds, you can check out this extremely useful feature chart:
[llama.cpp feature matrix](https://github.com/ggerganov/llama.cpp/wiki/Feature-matrix)
But basically, if you're aiming for below Q4, and you're running cuBLAS (Nvidia) or rocBLAS (AMD), you should look towards the I-quants. These are in format IQX_X, like IQ3_M. These are newer and offer better performance for their size.
These I-quants can also be used on CPU and Apple Metal, but will be slower than their K-quant equivalent, so speed vs performance is a tradeoff you'll have to decide.
The I-quants are *not* compatible with Vulcan, which is also AMD, so if you have an AMD card double check if you're using the rocBLAS build or the Vulcan build. At the time of writing this, LM Studio has a preview with ROCm support, and other inference engines have specific builds for ROCm.
Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}

3
gemma-2-9b-it-IQ2_M.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:94b6fe40ee612eb50159ad3e6fc97cc04c88ec7af8a813246d8d8c27c4522a00
size 3434988384

3
gemma-2-9b-it-IQ2_S.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ee80dfba6b4b7aee3528d742526d984496e621640d3c75eb521a36afed2c6010
size 3211805536

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4f3ae196ff0f2a34927db3ed93451575394fb47e0a843f5d7915f2a9da02e7fd
size 3067700064

3
gemma-2-9b-it-IQ3_M.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:744e3ead642fcbde768639cf2532f78da5b8ca80ce1718fed712c0ab38a5ebda
size 4494995808

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:294638961d516cbc520c0dcd6a143fe0226f9969ee483e3e0d89b0f6f3587e4d
size 4145369440

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:79dfb350e10a4c7598b957c00381b7a8459f64e99457d4d05203dc2528683824
size 3797058400

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a8817cc2720f51f1130d994fcacd58b8860c88882d10bbffb29ed4b5f62f155b
size 5183410528

3
gemma-2-9b-it-Q2_K.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6c3d884107e4d01bac8316cbe755137beac8988cc9c8a9c8806b61f0c4d8bcc5
size 3805778272

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d97e40aef60b072b15d9bdd41b171f141f46b45a18eaee257d9bf91daea52e1f
size 5132833120

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:066d5cd0c5ad7a8d2b7407fb53f56858e6736c8248a06111b5f65afcbc709c8f
size 4762161504

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6a0dc0e74df5cd76fb134249f4858bd4610049111482daa6dc1d8479ff283f21
size 4338045280

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:05dfbfc607fce0d039cac53098c387eb66c9c17587eb8bbf8b400c4769ae0252
size 6844347232

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0874bf61be2e4b3d0a4a75e58fbd442dc410745d513c1e1e5de0b54ae33e65db
size 5761438048

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2ae8828f11ad498fa6fb0c31ccbd5f6ec0bc3b579613f164b8bebc663f9d1763
size 5479305568

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:381bc6455ee6bd33dfa3987d001678e390019e214a6f3e7fb17e22853a0102da
size 7730656096

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fd2ef3823778e3c138aebb5892fcc4587313549ec3c21b84103090ce4b7617b4
size 6647746912

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4ba2748d33407c999dc1c2fe6845e39eb686792cb5e42d5d1bc20599cfb2bdb7
size 6483972448

3
gemma-2-9b-it-Q6_K.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3f38ef3fe66173a34e05b7f356fa0880b3941e64c94618623f4c243e1a4fde12
size 7589450080

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d69cac05ff85f04207610e19def2e741d0fda43c19e0bf64cc7dfef9f005a2ef
size 8672359264

3
gemma-2-9b-it-Q8_0.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:68ef14eb96fb6ab0dfb04dc9d73e4b5ff42fda0011110a160881d20b3d84b594
size 9827640160

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0c2c8fe89751b1be794ce20ae92a764b3fe7fdfad735e5402650b5e40ad5788c
size 10688230240

3
gemma-2-9b-it-f32.gguf Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4f7d9c68f33c338a0d98faf6bad67cb13d39a5d4f3c87965bff4d62620d27d70
size 36974719488

3
gemma-2-9b-it.imatrix Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3ebc87750f8830146ae668032ca7e319bfbf89e4276c1a514aeee1be9e6addfd
size 6116901