diff --git a/.gitattributes b/.gitattributes index a6344aa..6191696 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +granite-8b-code-instruct-128k-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..8848bc2 --- /dev/null +++ b/README.md @@ -0,0 +1,144 @@ +--- +pipeline_tag: text-generation +inference: false +license: apache-2.0 +datasets: +- bigcode/commitpackft +- TIGER-Lab/MathInstruct +- meta-math/MetaMathQA +- glaiveai/glaive-code-assistant-v3 +- glaive-function-calling-v2 +- bugdaryan/sql-create-context-instruction +- garage-bAInd/Open-Platypus +- nvidia/HelpSteer +- bigcode/self-oss-instruct-sc2-exec-filter-50k +metrics: +- code_eval +library_name: transformers +tags: +- code +- granite +- TensorBlock +- GGUF +base_model: ibm-granite/granite-8b-code-instruct-128k +model-index: +- name: granite-8B-Code-instruct-128k + results: + - task: + type: text-generation + dataset: + name: HumanEvalSynthesis (Python) + type: bigcode/humanevalpack + metrics: + - type: pass@1 + value: 62.2 + name: pass@1 + verified: false + - type: pass@1 + value: 51.4 + name: pass@1 + verified: false + - type: pass@1 + value: 38.9 + name: pass@1 + verified: false + - type: pass@1 + value: 38.3 + name: pass@1 + verified: false + - task: + type: text-generation + dataset: + name: RepoQA (Python@16K) + type: repoqa + metrics: + - type: pass@1 (thresh=0.5) + value: 73.0 + name: pass@1 (thresh=0.5) + verified: false + - type: pass@1 (thresh=0.5) + value: 37.0 + name: pass@1 (thresh=0.5) + verified: false + - type: pass@1 (thresh=0.5) + value: 73.0 + name: pass@1 (thresh=0.5) + verified: false + - type: pass@1 (thresh=0.5) + value: 62.0 + name: pass@1 (thresh=0.5) + verified: false + - type: pass@1 (thresh=0.5) + value: 63.0 + name: pass@1 (thresh=0.5) + verified: false +--- + +
+TensorBlock +
+
+
+

+ Feedback and support: TensorBlock's Twitter/X, Telegram Group and Discord server +

+
+
+ +## ibm-granite/granite-8b-code-instruct-128k - GGUF + +This repo contains GGUF format model files for [ibm-granite/granite-8b-code-instruct-128k](https://huggingface.co/ibm-granite/granite-8b-code-instruct-128k). + +The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d). + +## Prompt template + +``` +System: +{system_prompt} + +Question: +{prompt} + +Answer: +``` + +## Model file specification + +| Filename | Quant type | File Size | Description | +| -------- | ---------- | --------- | ----------- | +| [granite-8b-code-instruct-128k-Q2_K.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q2_K.gguf) | Q2_K | 2.852 GB | smallest, significant quality loss - not recommended for most purposes | +| [granite-8b-code-instruct-128k-Q3_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q3_K_S.gguf) | Q3_K_S | 3.304 GB | very small, high quality loss | +| [granite-8b-code-instruct-128k-Q3_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q3_K_M.gguf) | Q3_K_M | 3.674 GB | very small, high quality loss | +| [granite-8b-code-instruct-128k-Q3_K_L.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q3_K_L.gguf) | Q3_K_L | 3.993 GB | small, substantial quality loss | +| [granite-8b-code-instruct-128k-Q4_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q4_0.gguf) | Q4_0 | 4.276 GB | legacy; small, very high quality loss - prefer using Q3_K_M | +| [granite-8b-code-instruct-128k-Q4_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q4_K_S.gguf) | Q4_K_S | 4.305 GB | small, greater quality loss | +| [granite-8b-code-instruct-128k-Q4_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q4_K_M.gguf) | Q4_K_M | 4.548 GB | medium, balanced quality - recommended | +| [granite-8b-code-instruct-128k-Q5_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q5_0.gguf) | Q5_0 | 5.190 GB | legacy; medium, balanced quality - prefer using Q4_K_M | +| [granite-8b-code-instruct-128k-Q5_K_S.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q5_K_S.gguf) | Q5_K_S | 5.190 GB | large, low quality loss - recommended | +| [granite-8b-code-instruct-128k-Q5_K_M.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q5_K_M.gguf) | Q5_K_M | 5.330 GB | large, very low quality loss - recommended | +| [granite-8b-code-instruct-128k-Q6_K.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q6_K.gguf) | Q6_K | 6.161 GB | very large, extremely low quality loss | +| [granite-8b-code-instruct-128k-Q8_0.gguf](https://huggingface.co/tensorblock/granite-8b-code-instruct-128k-GGUF/tree/main/granite-8b-code-instruct-128k-Q8_0.gguf) | Q8_0 | 7.977 GB | very large, extremely low quality loss - not recommended | + + +## Downloading instruction + +### Command line + +Firstly, install Huggingface Client + +```shell +pip install -U "huggingface_hub[cli]" +``` + +Then, downoad the individual model file the a local directory + +```shell +huggingface-cli download tensorblock/granite-8b-code-instruct-128k-GGUF --include "granite-8b-code-instruct-128k-Q2_K.gguf" --local-dir MY_LOCAL_DIR +``` + +If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try: + +```shell +huggingface-cli download tensorblock/granite-8b-code-instruct-128k-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf' +``` diff --git a/granite-8b-code-instruct-128k-Q2_K.gguf b/granite-8b-code-instruct-128k-Q2_K.gguf new file mode 100644 index 0000000..35ff928 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e2417c0c8e2e515aed0ef0ce0a5cf21b6fd81c023d17c993ab6c59e4cf8061 +size 3062071168 diff --git a/granite-8b-code-instruct-128k-Q3_K_L.gguf b/granite-8b-code-instruct-128k-Q3_K_L.gguf new file mode 100644 index 0000000..63944e3 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1382114244aa9dea149436cae98eeb91e707475dc364f61f519b22d7faa2f76b +size 4287725440 diff --git a/granite-8b-code-instruct-128k-Q3_K_M.gguf b/granite-8b-code-instruct-128k-Q3_K_M.gguf new file mode 100644 index 0000000..affeb25 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3b8d8cdba311586998eadbfb0e1dd924d69d63dbfc27994360b4031d235379c +size 3944841088 diff --git a/granite-8b-code-instruct-128k-Q3_K_S.gguf b/granite-8b-code-instruct-128k-Q3_K_S.gguf new file mode 100644 index 0000000..6a52abe --- /dev/null +++ b/granite-8b-code-instruct-128k-Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f598a0fbf47c7c45a3aa05b113d77ad0f5b3a68642c6135c5e987ec9e5023427 +size 3548086144 diff --git a/granite-8b-code-instruct-128k-Q4_0.gguf b/granite-8b-code-instruct-128k-Q4_0.gguf new file mode 100644 index 0000000..4eb366c --- /dev/null +++ b/granite-8b-code-instruct-128k-Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8d51547840cc28a6d8a5c261769bff2767d6c515de5d1c1e2ebfd64915057cb +size 4590894976 diff --git a/granite-8b-code-instruct-128k-Q4_K_M.gguf b/granite-8b-code-instruct-128k-Q4_K_M.gguf new file mode 100644 index 0000000..d83cc06 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04680b9c3770f0e76b3399613cb16054211d92d9d43ce395fc94919cfcf638b1 +size 4882857856 diff --git a/granite-8b-code-instruct-128k-Q4_K_S.gguf b/granite-8b-code-instruct-128k-Q4_K_S.gguf new file mode 100644 index 0000000..c073c88 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a759447e056556dbe0ff721194677b2d91db8289f015f54724964af56ed51cc4 +size 4622352256 diff --git a/granite-8b-code-instruct-128k-Q5_0.gguf b/granite-8b-code-instruct-128k-Q5_0.gguf new file mode 100644 index 0000000..8837610 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6cea2eec37b73a126536989b3d1f11e2c8c25af6bc6ea6ff3e971e646511748 +size 5572362112 diff --git a/granite-8b-code-instruct-128k-Q5_K_M.gguf b/granite-8b-code-instruct-128k-Q5_K_M.gguf new file mode 100644 index 0000000..820f06b --- /dev/null +++ b/granite-8b-code-instruct-128k-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37c4493d6f184275e654ebdd2ea5a0e8d93dcd2af5f149cd79270eb5d8c381d0 +size 5722767232 diff --git a/granite-8b-code-instruct-128k-Q5_K_S.gguf b/granite-8b-code-instruct-128k-Q5_K_S.gguf new file mode 100644 index 0000000..5edd30b --- /dev/null +++ b/granite-8b-code-instruct-128k-Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:979e66fd59ebec2f8d86dc7c7a08ff636b9608b7169c469c925ca2358e3b0cda +size 5572362112 diff --git a/granite-8b-code-instruct-128k-Q6_K.gguf b/granite-8b-code-instruct-128k-Q6_K.gguf new file mode 100644 index 0000000..76f25d2 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:109497efa8560c94aaf68fffb599918258e4b246ffba97020a91719f0c644897 +size 6615170944 diff --git a/granite-8b-code-instruct-128k-Q8_0.gguf b/granite-8b-code-instruct-128k-Q8_0.gguf new file mode 100644 index 0000000..5e10ea3 --- /dev/null +++ b/granite-8b-code-instruct-128k-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e301355a06a653fd7b27508abfe91abfce707c479bda0b7b21c345bafc0f370 +size 8565522304