From cb4864ae17b9b51a44538c0072a309912b918107 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Wed, 22 Apr 2026 01:09:42 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: afrideva/no-prompt-1.3b-GGUF Source: Original Platform --- .gitattributes | 42 +++++++++++++++++++++++ README.md | 69 ++++++++++++++++++++++++++++++++++++++ no-prompt-1.3b.fp16.gguf | 3 ++ no-prompt-1.3b.q2_k.gguf | 3 ++ no-prompt-1.3b.q3_k_m.gguf | 3 ++ no-prompt-1.3b.q4_k_m.gguf | 3 ++ no-prompt-1.3b.q5_k_m.gguf | 3 ++ no-prompt-1.3b.q6_k.gguf | 3 ++ no-prompt-1.3b.q8_0.gguf | 3 ++ 9 files changed, 132 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 no-prompt-1.3b.fp16.gguf create mode 100644 no-prompt-1.3b.q2_k.gguf create mode 100644 no-prompt-1.3b.q3_k_m.gguf create mode 100644 no-prompt-1.3b.q4_k_m.gguf create mode 100644 no-prompt-1.3b.q5_k_m.gguf create mode 100644 no-prompt-1.3b.q6_k.gguf create mode 100644 no-prompt-1.3b.q8_0.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..dc950e3 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,42 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.fp16.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q2_k.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q6_k.gguf filter=lfs diff=lfs merge=lfs -text +no-prompt-1.3b.q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..a5624c8 --- /dev/null +++ b/README.md @@ -0,0 +1,69 @@ +--- +base_model: appvoid/no-prompt-1.3b +datasets: +- appvoid/no-prompt-15k +inference: false +language: +- en +license: apache-2.0 +model_creator: appvoid +model_name: no-prompt-1.3b +pipeline_tag: text-generation +quantized_by: afrideva +tags: +- gguf +- ggml +- quantized +- q2_k +- q3_k_m +- q4_k_m +- q5_k_m +- q6_k +- q8_0 +--- +# appvoid/no-prompt-1.3b-GGUF + +Quantized GGUF model files for [no-prompt-1.3b](https://huggingface.co/appvoid/no-prompt-1.3b) from [appvoid](https://huggingface.co/appvoid) + + +| Name | Quant method | Size | +| ---- | ---- | ---- | +| [no-prompt-1.3b.fp16.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.fp16.gguf) | fp16 | 2.69 GB | +| [no-prompt-1.3b.q2_k.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q2_k.gguf) | q2_k | 631.52 MB | +| [no-prompt-1.3b.q3_k_m.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q3_k_m.gguf) | q3_k_m | 704.72 MB | +| [no-prompt-1.3b.q4_k_m.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q4_k_m.gguf) | q4_k_m | 873.27 MB | +| [no-prompt-1.3b.q5_k_m.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q5_k_m.gguf) | q5_k_m | 1.00 GB | +| [no-prompt-1.3b.q6_k.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q6_k.gguf) | q6_k | 1.17 GB | +| [no-prompt-1.3b.q8_0.gguf](https://huggingface.co/afrideva/no-prompt-1.3b-GGUF/resolve/main/no-prompt-1.3b.q8_0.gguf) | q8_0 | 1.43 GB | + + + +## Original Model Card: +![palmer](https://huggingface.co/appvoid/no-prompt-1.3b/resolve/main/_ccd1a5dd-2ddc-4d5a-8163-fd6d1b39f5f4.jpeg?download=true) +# no-prompt +### a sheared-llama-1.3b fine-tuning +This model uses an 1.3 billion parameters model as base to be further fine-tuned on the same data as palmer. It works pretty good and even surpasses sota model on `hellaswag`. + +### evaluation +|Model| ARC_C| HellaSwag| PIQA| Winogrande| +|------|-----|-----------|------|-------------| +|tinyllama-2t| 0.2807| 0.5463| 0.7067| 0.5683| +|palmer-001 | 0.2807| 0.5524| 0.7106| 0.5896| +|sheared-1.3b| 0.2910| 0.5935| 0.7339| 0.5809| +|no-prompt-1.3b| 0.3157| **0.6022**| 0.7334| 0.5864| +|falcon-rw-1b-instruct-openorca (sota) | **0.3362**| 0.5997| **0.7394**| **0.6148**| + +This model was trained on less than 25% of the dataset yet achieves competitive performance to current sota on open llm leaderboard. + +### training +Training took ~5 P100 gpu hours. It was trained on 15,000 gpt-4 shuffled samples. no-prompt was fine-tuned using lower learning rates ensuring it keeps as much general knowledge as possible. + +### prompt +``` +no prompt +``` + +### limitations +Hallucinations are frequent, just as any transformer model this size. + +Buy Me A Coffee \ No newline at end of file diff --git a/no-prompt-1.3b.fp16.gguf b/no-prompt-1.3b.fp16.gguf new file mode 100644 index 0000000..3bbb4ea --- /dev/null +++ b/no-prompt-1.3b.fp16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f76c57abf9b6bdabe3feae2ce0d4aed6ff12827a0de73ba0990d3f3804f384e6 +size 2692757408 diff --git a/no-prompt-1.3b.q2_k.gguf b/no-prompt-1.3b.q2_k.gguf new file mode 100644 index 0000000..f90ad53 --- /dev/null +++ b/no-prompt-1.3b.q2_k.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94aef392a4bb7045e09048dbbbb60a56a464f65f2c929dde0612654892b48b9d +size 631515072 diff --git a/no-prompt-1.3b.q3_k_m.gguf b/no-prompt-1.3b.q3_k_m.gguf new file mode 100644 index 0000000..3e5829c --- /dev/null +++ b/no-prompt-1.3b.q3_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3141ee2f5371184c3a3ab4e945ca824484581882f4b65530ccc6cf1e87f1155 +size 704722880 diff --git a/no-prompt-1.3b.q4_k_m.gguf b/no-prompt-1.3b.q4_k_m.gguf new file mode 100644 index 0000000..2f55a3b --- /dev/null +++ b/no-prompt-1.3b.q4_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c9ce2be40e6823db3b92e1cc0f92ee72e4ebe660e0e698e76f77d74fd2d37b2 +size 873269184 diff --git a/no-prompt-1.3b.q5_k_m.gguf b/no-prompt-1.3b.q5_k_m.gguf new file mode 100644 index 0000000..08329e8 --- /dev/null +++ b/no-prompt-1.3b.q5_k_m.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b4534d996c0d638430e938da0697f0d34954c7e85e7a48d7138fb5173fc7a5 +size 1001588672 diff --git a/no-prompt-1.3b.q6_k.gguf b/no-prompt-1.3b.q6_k.gguf new file mode 100644 index 0000000..b34ec7d --- /dev/null +++ b/no-prompt-1.3b.q6_k.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46c2bfcac644ab9999a2dfb07ec0500698e10d12d84fcd5ee6b8be698337edb1 +size 1171216320 diff --git a/no-prompt-1.3b.q8_0.gguf b/no-prompt-1.3b.q8_0.gguf new file mode 100644 index 0000000..b869eb9 --- /dev/null +++ b/no-prompt-1.3b.q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3057cbcbb095192754b1fa04e83a15cf79a4224ed5b29bdc5e3fbbcd39e63260 +size 1431517120