From 85f611e8971cd2b0cdd827ab54f316371758d90c Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Fri, 1 May 2026 05:38:17 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: mradermacher/Llama-2-7b-chat-hf-GGUF Source: Original Platform --- .gitattributes | 47 +++++++++++++++++++++ Llama-2-7b-chat-hf.IQ4_XS.gguf | 3 ++ Llama-2-7b-chat-hf.Q2_K.gguf | 3 ++ Llama-2-7b-chat-hf.Q3_K_L.gguf | 3 ++ Llama-2-7b-chat-hf.Q3_K_M.gguf | 3 ++ Llama-2-7b-chat-hf.Q3_K_S.gguf | 3 ++ Llama-2-7b-chat-hf.Q4_K_M.gguf | 3 ++ Llama-2-7b-chat-hf.Q4_K_S.gguf | 3 ++ Llama-2-7b-chat-hf.Q5_K_M.gguf | 3 ++ Llama-2-7b-chat-hf.Q5_K_S.gguf | 3 ++ Llama-2-7b-chat-hf.Q6_K.gguf | 3 ++ Llama-2-7b-chat-hf.Q8_0.gguf | 3 ++ Llama-2-7b-chat-hf.f16.gguf | 3 ++ README.md | 75 ++++++++++++++++++++++++++++++++++ 14 files changed, 158 insertions(+) create mode 100644 .gitattributes create mode 100644 Llama-2-7b-chat-hf.IQ4_XS.gguf create mode 100644 Llama-2-7b-chat-hf.Q2_K.gguf create mode 100644 Llama-2-7b-chat-hf.Q3_K_L.gguf create mode 100644 Llama-2-7b-chat-hf.Q3_K_M.gguf create mode 100644 Llama-2-7b-chat-hf.Q3_K_S.gguf create mode 100644 Llama-2-7b-chat-hf.Q4_K_M.gguf create mode 100644 Llama-2-7b-chat-hf.Q4_K_S.gguf create mode 100644 Llama-2-7b-chat-hf.Q5_K_M.gguf create mode 100644 Llama-2-7b-chat-hf.Q5_K_S.gguf create mode 100644 Llama-2-7b-chat-hf.Q6_K.gguf create mode 100644 Llama-2-7b-chat-hf.Q8_0.gguf create mode 100644 Llama-2-7b-chat-hf.f16.gguf create mode 100644 README.md diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..4bb239d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.f16.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-2-7b-chat-hf.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Llama-2-7b-chat-hf.IQ4_XS.gguf b/Llama-2-7b-chat-hf.IQ4_XS.gguf new file mode 100644 index 0000000..c5b6a6e --- /dev/null +++ b/Llama-2-7b-chat-hf.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:876e6d583b2d56f3121cc441e05a2cf5aac9ef5fae8219b48e9d1d6a4db2e2e4 +size 3647517376 diff --git a/Llama-2-7b-chat-hf.Q2_K.gguf b/Llama-2-7b-chat-hf.Q2_K.gguf new file mode 100644 index 0000000..3809288 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7b270633f412c54a4164899386a0550a0748a30fffe22c1375a9e3307b14387 +size 2532864704 diff --git a/Llama-2-7b-chat-hf.Q3_K_L.gguf b/Llama-2-7b-chat-hf.Q3_K_L.gguf new file mode 100644 index 0000000..2179057 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eadcbd37215fedaecd8f86c9117245416467fde1592becade13ab3fa2ac3687d +size 3597112000 diff --git a/Llama-2-7b-chat-hf.Q3_K_M.gguf b/Llama-2-7b-chat-hf.Q3_K_M.gguf new file mode 100644 index 0000000..d369722 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebc3a71512f7120743f783a5c4a0fe5fb87de345997f5d3b7d6fceca4be1f25a +size 3298005696 diff --git a/Llama-2-7b-chat-hf.Q3_K_S.gguf b/Llama-2-7b-chat-hf.Q3_K_S.gguf new file mode 100644 index 0000000..b6743ad --- /dev/null +++ b/Llama-2-7b-chat-hf.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f51efdae2440a0dcc956b2237cba0ed09f727d1267f4d8165671960abc8d43b +size 2948305600 diff --git a/Llama-2-7b-chat-hf.Q4_K_M.gguf b/Llama-2-7b-chat-hf.Q4_K_M.gguf new file mode 100644 index 0000000..6cbeb82 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd96dece4b436918ba5c904f01384b019b9382bb6fb568f7e78834d9b9fef3fc +size 4081005248 diff --git a/Llama-2-7b-chat-hf.Q4_K_S.gguf b/Llama-2-7b-chat-hf.Q4_K_S.gguf new file mode 100644 index 0000000..f58ee98 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fb324e88307c9e9799ad991230e527df0ea64aadb7ef9b0742cb7da8be05042 +size 3856741056 diff --git a/Llama-2-7b-chat-hf.Q5_K_M.gguf b/Llama-2-7b-chat-hf.Q5_K_M.gguf new file mode 100644 index 0000000..f500a04 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cd3a8e8d19cd57c61a037350925ea2b41c67813ea1bb13d555d063d7f1d978c +size 4783157952 diff --git a/Llama-2-7b-chat-hf.Q5_K_S.gguf b/Llama-2-7b-chat-hf.Q5_K_S.gguf new file mode 100644 index 0000000..5b2320a --- /dev/null +++ b/Llama-2-7b-chat-hf.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3ba9c67848ca712ab2211c208e158d426951c00a1ff0df3ebe85952427c03a +size 4651692736 diff --git a/Llama-2-7b-chat-hf.Q6_K.gguf b/Llama-2-7b-chat-hf.Q6_K.gguf new file mode 100644 index 0000000..cb42ec1 --- /dev/null +++ b/Llama-2-7b-chat-hf.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3703fc1fcc57d1b50d5af97182d9c4188ad8368c3dfb1233d4a629ea4f69abac +size 5529195200 diff --git a/Llama-2-7b-chat-hf.Q8_0.gguf b/Llama-2-7b-chat-hf.Q8_0.gguf new file mode 100644 index 0000000..d70d14c --- /dev/null +++ b/Llama-2-7b-chat-hf.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7ad03270ea8ad25b732499a97006dab93d4826e43657b511b3b52a1b0613f9f +size 7161090752 diff --git a/Llama-2-7b-chat-hf.f16.gguf b/Llama-2-7b-chat-hf.f16.gguf new file mode 100644 index 0000000..4e445a1 --- /dev/null +++ b/Llama-2-7b-chat-hf.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:894698aa89c62e944619db142e654baa66b0341f627fba0a409568af0e451005 +size 13478105792 diff --git a/README.md b/README.md new file mode 100644 index 0000000..315b68a --- /dev/null +++ b/README.md @@ -0,0 +1,75 @@ +--- +base_model: NousResearch/Llama-2-7b-chat-hf +extra_gated_button_content: Submit +extra_gated_fields: + ? I agree to share my name, email address and username with Meta and confirm that + I have already been granted download access on the Meta website + : checkbox +extra_gated_heading: Access Llama 2 on Hugging Face +language: +- en +library_name: transformers +quantized_by: mradermacher +tags: +- facebook +- meta +- pytorch +- llama +- llama-2 +--- +## About + + + + + + +static quants of https://huggingface.co/NousResearch/Llama-2-7b-chat-hf + + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q2_K.gguf) | Q2_K | 2.6 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q3_K_S.gguf) | Q3_K_S | 3.0 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q3_K_M.gguf) | Q3_K_M | 3.4 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q3_K_L.gguf) | Q3_K_L | 3.7 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.IQ4_XS.gguf) | IQ4_XS | 3.7 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q4_K_S.gguf) | Q4_K_S | 4.0 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q4_K_M.gguf) | Q4_K_M | 4.2 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q5_K_S.gguf) | Q5_K_S | 4.8 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q5_K_M.gguf) | Q5_K_M | 4.9 | | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q6_K.gguf) | Q6_K | 5.6 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.Q8_0.gguf) | Q8_0 | 7.3 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/Llama-2-7b-chat-hf-GGUF/resolve/main/Llama-2-7b-chat-hf.f16.gguf) | f16 | 13.6 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + +