Update metadata with huggingface_hub

This commit is contained in:
ai-modelscope
2024-11-27 10:26:03 +08:00
parent 273727606e
commit c88c79887c
28 changed files with 326 additions and 63 deletions

57
.gitattributes vendored
View File

@@ -1,47 +1,60 @@
*.7z filter=lfs diff=lfs merge=lfs -text *.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text
*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text
*.zstandard filter=lfs diff=lfs merge=lfs -text
*.tfevents* filter=lfs diff=lfs merge=lfs -text
*.db* filter=lfs diff=lfs merge=lfs -text
*.ark* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.gguf* filter=lfs diff=lfs merge=lfs -text
*.ggml filter=lfs diff=lfs merge=lfs -text
*.llamafile* filter=lfs diff=lfs merge=lfs -text
*.pt2 filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q6_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q5_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_0_8_8.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_0_4_8.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_0_4_4.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q3_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text
Humanish-LLama3-8B-Instruct.imatrix filter=lfs diff=lfs merge=lfs -text

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f859cad98ed0f56dbcd9fcdf49d9718a7935952c1ab8915d9446d0859985140a
size 2948281856

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:139cc1d51c8dc5403031b274c01565f6f935622d2996d94009d9dae5a515c6a0
size 3784824320

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1dd087d3163a75ebf4c22331e8e3cafa9ae8b1bbcb26b9f9d69b570cceb99eb7
size 3518748160

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bcb3e5f0a969af27786bed05d990a09c67d618ac19b4b4aa4128d9ff9981ee16
size 4447663616

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5304202add92d1a92d9c727696219b25e6900b687b5a728d2df35b2060442d24
size 3179132416

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3eddfdaf44298ba04835f5b6553fc3074c0eef43494f8f032c5cf7dcca31d13c
size 3692156416

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:57c174fb7c4b72fbe848991e85d6d80bc1b0f73e7de6de98a30bc6d9e5f7c62d
size 4321957376

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:97a02e9d631a2224505b386dafeb9d007541574458fd1abd366f51818a11892f
size 4018918912

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9e923c5a5067aec7e459a9f9c274a63dcc01dd46208207f9041082c7e52d8911
size 3664500224

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:58141f71ce9129cf101eb0eddee115e7b3686360927094e5b668f65bc9b33ab6
size 4781626880

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ec9792b39156314def1a211291ee67f77e5bd21a714356a24d71e1c98d9d01f9
size 4675892736

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:71dcb07842a97e9edfb7c6b4c369ce4280b0cf4d43e27a80cf48ec64007d9a05
size 4661212672

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b1dce5086c2b28375712b2710c9ff57777ec1eb80349e0e27c8a93ae1da414aa
size 4661212672

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:de54536d178d5f133e6f50766e7733cb7f9400d1e008ffb532ebabbd5d599dd2
size 4661212672

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ec3b0953664dc52f5153f97e5b49bf55a6d4f34c5c199b66cdd759dd37d645b7
size 5310633472

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6061136cfdfde91b9943ee202774765733760b0edb611fdeaff0b7aae9c41add
size 4920735232

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8cce4419b54b4f7bec03e4bc3b69b38783284029358599cf13188d605efabb5c
size 4692669952

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a297ee459d57af5f1f0beff1ff76d867db1413fa4fd9ef46885daebd579c2924
size 6057219584

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c81144446a563379336a2ead1beff57148fe764fc93246865d40db4498414c68
size 5732988416

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ca749fe5970be299aea7aa0b19c71c958d3f5db996ec4dea6eeb8139a770198f
size 5599294976

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b94ff7b55f3c27802bc209e5fab75a41b86d47de2f70e12fb336f2e84004c0e7
size 6596007424

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a993a7833f10d73079cd6bd1051e0c4271236e19370a29a075efe7d2894fa04d
size 6850467328

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d8b467aee68d88c48f670074a148b7951fdef8a47dce2ebb0961f1a009c3eaaa
size 8540771840

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a79cea2fa63c9ead1c8dc30b3380b7bafb51a2250c2b211075606c7901912a9b
size 16068891872

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:500ae550bfb2eab35ff991924d5762d56cfe7ad5567d309f55f0540fba7ba8a2
size 4988170

256
README.md
View File

@@ -1,47 +1,221 @@
--- ---
license: Apache License 2.0 base_model: HumanLLMs/Humanish-LLama3-8B-Instruct
datasets:
#model-type: - HumanLLMs/humanish-dpo-project
##如 gpt、phi、llama、chatglm、baichuan 等 license: llama3
#- gpt pipeline_tag: text-generation
tags:
#domain: - axolotl
##如 nlp、cv、audio、multi-modal - dpo
#- nlp - trl
- generated_from_trainer
#language: quantized_by: bartowski
##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa model-index:
#- cn - name: Humanish-LLama3.1-8B-Instruct
results:
#metrics: - task:
##如 CIDEr、Blue、ROUGE 等 type: text-generation
#- CIDEr name: Text Generation
dataset:
#tags: name: IFEval (0-Shot)
##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 type: HuggingFaceH4/ifeval
#- pretrained args:
num_few_shot: 0
#tools: metrics:
##如 vllm、fastchat、llamacpp、AdaSeq 等 - type: inst_level_strict_acc and prompt_level_strict_acc
#- vllm value: 64.98
name: strict accuracy
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: BBH (3-Shot)
type: BBH
args:
num_few_shot: 3
metrics:
- type: acc_norm
value: 28.01
name: normalized accuracy
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: MATH Lvl 5 (4-Shot)
type: hendrycks/competition_math
args:
num_few_shot: 4
metrics:
- type: exact_match
value: 8.46
name: exact match
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: GPQA (0-shot)
type: Idavidrein/gpqa
args:
num_few_shot: 0
metrics:
- type: acc_norm
value: 0.78
name: acc_norm
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: MuSR (0-shot)
type: TAUR-Lab/MuSR
args:
num_few_shot: 0
metrics:
- type: acc_norm
value: 2.0
name: acc_norm
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
- task:
type: text-generation
name: Text Generation
dataset:
name: MMLU-PRO (5-shot)
type: TIGER-Lab/MMLU-Pro
config: main
split: test
args:
num_few_shot: 5
metrics:
- type: acc
value: 30.02
name: accuracy
source:
url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=HumanLLMs/Humanish-LLama3.1-8B-Instruct
name: Open LLM Leaderboard
--- ---
### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
#### 您可以通过如下git clone命令或者ModelScope SDK来下载模型
SDK下载 ## Llamacpp imatrix Quantizations of Humanish-LLama3-8B-Instruct
```bash
#安装ModelScope Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b3878">b3878</a> for quantization.
pip install modelscope
Original model: https://huggingface.co/HumanLLMs/Humanish-LLama3-8B-Instruct
All quants made using imatrix option with dataset from [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8)
Run them in [LM Studio](https://lmstudio.ai/)
## Prompt format
No prompt format found, check original model page
## Download a file (not the whole branch) from below:
| Filename | Quant type | File Size | Split | Description |
| -------- | ---------- | --------- | ----- | ----------- |
| [Humanish-LLama3-8B-Instruct-f16.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-f16.gguf) | f16 | 16.07GB | false | Full F16 weights. |
| [Humanish-LLama3-8B-Instruct-Q8_0.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q8_0.gguf) | Q8_0 | 8.54GB | false | Extremely high quality, generally unneeded but max available quant. |
| [Humanish-LLama3-8B-Instruct-Q6_K_L.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q6_K_L.gguf) | Q6_K_L | 6.85GB | false | Uses Q8_0 for embed and output weights. Very high quality, near perfect, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q6_K.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q6_K.gguf) | Q6_K | 6.60GB | false | Very high quality, near perfect, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q5_K_L.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q5_K_L.gguf) | Q5_K_L | 6.06GB | false | Uses Q8_0 for embed and output weights. High quality, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q5_K_M.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q5_K_M.gguf) | Q5_K_M | 5.73GB | false | High quality, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q5_K_S.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q5_K_S.gguf) | Q5_K_S | 5.60GB | false | High quality, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q4_K_L.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_K_L.gguf) | Q4_K_L | 5.31GB | false | Uses Q8_0 for embed and output weights. Good quality, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q4_K_M.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_K_M.gguf) | Q4_K_M | 4.92GB | false | Good quality, default size for must use cases, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q3_K_XL.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q3_K_XL.gguf) | Q3_K_XL | 4.78GB | false | Uses Q8_0 for embed and output weights. Lower quality but usable, good for low RAM availability. |
| [Humanish-LLama3-8B-Instruct-Q4_K_S.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_K_S.gguf) | Q4_K_S | 4.69GB | false | Slightly lower quality with more space savings, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q4_0.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_0.gguf) | Q4_0 | 4.68GB | false | Legacy format, generally not worth using over similarly sized formats |
| [Humanish-LLama3-8B-Instruct-Q4_0_8_8.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_0_8_8.gguf) | Q4_0_8_8 | 4.66GB | false | Optimized for ARM inference. Requires 'sve' support (see link below). |
| [Humanish-LLama3-8B-Instruct-Q4_0_4_8.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_0_4_8.gguf) | Q4_0_4_8 | 4.66GB | false | Optimized for ARM inference. Requires 'i8mm' support (see link below). |
| [Humanish-LLama3-8B-Instruct-Q4_0_4_4.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q4_0_4_4.gguf) | Q4_0_4_4 | 4.66GB | false | Optimized for ARM inference. Should work well on all ARM chips, pick this if you're unsure. |
| [Humanish-LLama3-8B-Instruct-IQ4_XS.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-IQ4_XS.gguf) | IQ4_XS | 4.45GB | false | Decent quality, smaller than Q4_K_S with similar performance, *recommended*. |
| [Humanish-LLama3-8B-Instruct-Q3_K_L.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q3_K_L.gguf) | Q3_K_L | 4.32GB | false | Lower quality but usable, good for low RAM availability. |
| [Humanish-LLama3-8B-Instruct-Q3_K_M.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q3_K_M.gguf) | Q3_K_M | 4.02GB | false | Low quality. |
| [Humanish-LLama3-8B-Instruct-IQ3_M.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-IQ3_M.gguf) | IQ3_M | 3.78GB | false | Medium-low quality, new method with decent performance comparable to Q3_K_M. |
| [Humanish-LLama3-8B-Instruct-Q2_K_L.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q2_K_L.gguf) | Q2_K_L | 3.69GB | false | Uses Q8_0 for embed and output weights. Very low quality but surprisingly usable. |
| [Humanish-LLama3-8B-Instruct-Q3_K_S.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q3_K_S.gguf) | Q3_K_S | 3.66GB | false | Low quality, not recommended. |
| [Humanish-LLama3-8B-Instruct-IQ3_XS.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-IQ3_XS.gguf) | IQ3_XS | 3.52GB | false | Lower quality, new method with decent performance, slightly better than Q3_K_S. |
| [Humanish-LLama3-8B-Instruct-Q2_K.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-Q2_K.gguf) | Q2_K | 3.18GB | false | Very low quality but surprisingly usable. |
| [Humanish-LLama3-8B-Instruct-IQ2_M.gguf](https://huggingface.co/bartowski/Humanish-LLama3-8B-Instruct-GGUF/blob/main/Humanish-LLama3-8B-Instruct-IQ2_M.gguf) | IQ2_M | 2.95GB | false | Relatively low quality, uses SOTA techniques to be surprisingly usable. |
## Embed/output weights
Some of these quants (Q3_K_XL, Q4_K_L etc) are the standard quantization method with the embeddings and output weights quantized to Q8_0 instead of what they would normally default to.
Some say that this improves the quality, others don't notice any difference. If you use these models PLEASE COMMENT with your findings. I would like feedback that these are actually used and useful so I don't keep uploading quants no one is using.
Thanks!
## Downloading using huggingface-cli
First, make sure you have hugginface-cli installed:
``` ```
```python pip install -U "huggingface_hub[cli]"
#SDK模型下载
from modelscope import snapshot_download
model_dir = snapshot_download('bartowski/Humanish-LLama3-8B-Instruct-GGUF')
```
Git下载
```
#Git模型下载
git clone https://www.modelscope.cn/bartowski/Humanish-LLama3-8B-Instruct-GGUF.git
``` ```
<p style="color: lightgrey;">如果您是本模型的贡献者,我们邀请您根据<a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">模型贡献文档</a>,及时完善模型卡片内容。</p> Then, you can target the specific file you want:
```
huggingface-cli download bartowski/Humanish-LLama3-8B-Instruct-GGUF --include "Humanish-LLama3-8B-Instruct-Q4_K_M.gguf" --local-dir ./
```
If the model is bigger than 50GB, it will have been split into multiple files. In order to download them all to a local folder, run:
```
huggingface-cli download bartowski/Humanish-LLama3-8B-Instruct-GGUF --include "Humanish-LLama3-8B-Instruct-Q8_0/*" --local-dir ./
```
You can either specify a new local-dir (Humanish-LLama3-8B-Instruct-Q8_0) or download them all in place (./)
## Q4_0_X_X
These are *NOT* for Metal (Apple) offloading, only ARM chips.
If you're using an ARM chip, the Q4_0_X_X quants will have a substantial speedup. Check out Q4_0_4_4 speed comparisons [on the original pull request](https://github.com/ggerganov/llama.cpp/pull/5780#pullrequestreview-21657544660)
To check which one would work best for your ARM chip, you can check [AArch64 SoC features](https://gpages.juszkiewicz.com.pl/arm-socs-table/arm-socs.html) (thanks EloyOn!).
## Which file should I choose?
A great write up with charts showing various performances is provided by Artefact2 [here](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9)
The first thing to figure out is how big a model you can run. To do this, you'll need to figure out how much RAM and/or VRAM you have.
If you want your model running as FAST as possible, you'll want to fit the whole thing on your GPU's VRAM. Aim for a quant with a file size 1-2GB smaller than your GPU's total VRAM.
If you want the absolute maximum quality, add both your system RAM and your GPU's VRAM together, then similarly grab a quant with a file size 1-2GB Smaller than that total.
Next, you'll need to decide if you want to use an 'I-quant' or a 'K-quant'.
If you don't want to think too much, grab one of the K-quants. These are in format 'QX_K_X', like Q5_K_M.
If you want to get more into the weeds, you can check out this extremely useful feature chart:
[llama.cpp feature matrix](https://github.com/ggerganov/llama.cpp/wiki/Feature-matrix)
But basically, if you're aiming for below Q4, and you're running cuBLAS (Nvidia) or rocBLAS (AMD), you should look towards the I-quants. These are in format IQX_X, like IQ3_M. These are newer and offer better performance for their size.
These I-quants can also be used on CPU and Apple Metal, but will be slower than their K-quant equivalent, so speed vs performance is a tradeoff you'll have to decide.
The I-quants are *not* compatible with Vulcan, which is also AMD, so if you have an AMD card double check if you're using the rocBLAS build or the Vulcan build. At the time of writing this, LM Studio has a preview with ROCm support, and other inference engines have specific builds for ROCm.
## Credits
Thank you kalomaze and Dampf for assistance in creating the imatrix calibration dataset
Thank you ZeroWw for the inspiration to experiment with embed/output
Want to support my work? Visit my ko-fi page here: https://ko-fi.com/bartowski

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}