From b7e1ba9869b310e13a0487104ff9728439b87e08 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sun, 12 Apr 2026 08:36:08 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: mradermacher/aria-1b-chat-GGUF Source: Original Platform --- .gitattributes | 47 ++++++++++++++++++++++++ README.md | 79 ++++++++++++++++++++++++++++++++++++++++ aria-1b-chat.IQ4_XS.gguf | 3 ++ aria-1b-chat.Q2_K.gguf | 3 ++ aria-1b-chat.Q3_K_L.gguf | 3 ++ aria-1b-chat.Q3_K_M.gguf | 3 ++ aria-1b-chat.Q3_K_S.gguf | 3 ++ aria-1b-chat.Q4_K_M.gguf | 3 ++ aria-1b-chat.Q4_K_S.gguf | 3 ++ aria-1b-chat.Q5_K_M.gguf | 3 ++ aria-1b-chat.Q5_K_S.gguf | 3 ++ aria-1b-chat.Q6_K.gguf | 3 ++ aria-1b-chat.Q8_0.gguf | 3 ++ aria-1b-chat.f16.gguf | 3 ++ 14 files changed, 162 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 aria-1b-chat.IQ4_XS.gguf create mode 100644 aria-1b-chat.Q2_K.gguf create mode 100644 aria-1b-chat.Q3_K_L.gguf create mode 100644 aria-1b-chat.Q3_K_M.gguf create mode 100644 aria-1b-chat.Q3_K_S.gguf create mode 100644 aria-1b-chat.Q4_K_M.gguf create mode 100644 aria-1b-chat.Q4_K_S.gguf create mode 100644 aria-1b-chat.Q5_K_M.gguf create mode 100644 aria-1b-chat.Q5_K_S.gguf create mode 100644 aria-1b-chat.Q6_K.gguf create mode 100644 aria-1b-chat.Q8_0.gguf create mode 100644 aria-1b-chat.f16.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..8e62a08 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,47 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +aria-1b-chat.f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..93836c3 --- /dev/null +++ b/README.md @@ -0,0 +1,79 @@ +--- +base_model: dkumar15/aria-1b-chat +language: +- en +library_name: transformers +license: apache-2.0 +mradermacher: + readme_rev: 1 +quantized_by: mradermacher +tags: +- llama +- causal-lm +- from-scratch +- dpo +- chat +- text-generation +--- +## About + + + + + + + + + +static quants of https://huggingface.co/dkumar15/aria-1b-chat + + + +***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#aria-1b-chat-GGUF).*** + +weighted/imatrix quants are available at https://huggingface.co/mradermacher/aria-1b-chat-i1-GGUF +## Usage + +If you are unsure how to use GGUF files, refer to one of [TheBloke's +READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for +more details, including on how to concatenate multi-part files. + +## Provided Quants + +(sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) + +| Link | Type | Size/GB | Notes | +|:-----|:-----|--------:|:------| +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q2_K.gguf) | Q2_K | 0.6 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q3_K_S.gguf) | Q3_K_S | 0.6 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q3_K_M.gguf) | Q3_K_M | 0.7 | lower quality | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q3_K_L.gguf) | Q3_K_L | 0.7 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.IQ4_XS.gguf) | IQ4_XS | 0.7 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q4_K_S.gguf) | Q4_K_S | 0.8 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q4_K_M.gguf) | Q4_K_M | 0.8 | fast, recommended | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q5_K_S.gguf) | Q5_K_S | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q5_K_M.gguf) | Q5_K_M | 0.9 | | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q6_K.gguf) | Q6_K | 1.1 | very good quality | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.Q8_0.gguf) | Q8_0 | 1.3 | fast, best quality | +| [GGUF](https://huggingface.co/mradermacher/aria-1b-chat-GGUF/resolve/main/aria-1b-chat.f16.gguf) | f16 | 2.3 | 16 bpw, overkill | + +Here is a handy graph by ikawrakow comparing some lower-quality quant +types (lower is better): + +![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) + +And here are Artefact2's thoughts on the matter: +https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 + +## FAQ / Model Request + +See https://huggingface.co/mradermacher/model_requests for some answers to +questions you might have and/or if you want some other model quantized. + +## Thanks + +I thank my company, [nethype GmbH](https://www.nethype.de/), for letting +me use its servers and providing upgrades to my workstation to enable +this work in my free time. + + diff --git a/aria-1b-chat.IQ4_XS.gguf b/aria-1b-chat.IQ4_XS.gguf new file mode 100644 index 0000000..0a8c3a5 --- /dev/null +++ b/aria-1b-chat.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:270f7a31b519ba65755fc581afc8990ffc2a0ada37e685a49c9182a010598ec9 +size 623048832 diff --git a/aria-1b-chat.Q2_K.gguf b/aria-1b-chat.Q2_K.gguf new file mode 100644 index 0000000..a34a764 --- /dev/null +++ b/aria-1b-chat.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b39f581144fea62331dc9dae57fa08e3378f829c4c1a2999279576f39ca4781a +size 469078944 diff --git a/aria-1b-chat.Q3_K_L.gguf b/aria-1b-chat.Q3_K_L.gguf new file mode 100644 index 0000000..72b8a2d --- /dev/null +++ b/aria-1b-chat.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71bf4526f42955e174cf3d1b2c77777405c350e2519318825690e83bafc1585 +size 610993696 diff --git a/aria-1b-chat.Q3_K_M.gguf b/aria-1b-chat.Q3_K_M.gguf new file mode 100644 index 0000000..8b43cc1 --- /dev/null +++ b/aria-1b-chat.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f415e25b621d3e5f2d5a8e6e6cc88090720efc7ba77318049790f2a6c8592c7b +size 582043168 diff --git a/aria-1b-chat.Q3_K_S.gguf b/aria-1b-chat.Q3_K_S.gguf new file mode 100644 index 0000000..4f40061 --- /dev/null +++ b/aria-1b-chat.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dce6937361ffe0ace02c6589c4fc906da47ab0f18c3c71bc34a49a3831482a13 +size 534758944 diff --git a/aria-1b-chat.Q4_K_M.gguf b/aria-1b-chat.Q4_K_M.gguf new file mode 100644 index 0000000..848d3ca --- /dev/null +++ b/aria-1b-chat.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dd287537ccb63dc78b3f68a7468e4601da46b40d03e8929c5a7166d3cce9c92 +size 715946304 diff --git a/aria-1b-chat.Q4_K_S.gguf b/aria-1b-chat.Q4_K_S.gguf new file mode 100644 index 0000000..9bb3d6f --- /dev/null +++ b/aria-1b-chat.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8cbc967d705cf5f51add6fc58f38f4166e52774bfdb465cc7e815489d1b5b73 +size 672905536 diff --git a/aria-1b-chat.Q5_K_M.gguf b/aria-1b-chat.Q5_K_M.gguf new file mode 100644 index 0000000..11136f5 --- /dev/null +++ b/aria-1b-chat.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c1f637c06832718add9e77b2cb61d65aef66ff02be3c35194e8303be0a3c1da +size 822115392 diff --git a/aria-1b-chat.Q5_K_S.gguf b/aria-1b-chat.Q5_K_S.gguf new file mode 100644 index 0000000..dc85fd2 --- /dev/null +++ b/aria-1b-chat.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a79eaaf457559fb3eabc0bff20101e76aa888f9dd8c1a5bacc32ad3f6629600c +size 785497152 diff --git a/aria-1b-chat.Q6_K.gguf b/aria-1b-chat.Q6_K.gguf new file mode 100644 index 0000000..09ec63e --- /dev/null +++ b/aria-1b-chat.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f4502a535e9dcd48d145b002d8f60f74d4b13c5564a18ee5ce8411b925126c4 +size 968208256 diff --git a/aria-1b-chat.Q8_0.gguf b/aria-1b-chat.Q8_0.gguf new file mode 100644 index 0000000..74d7850 --- /dev/null +++ b/aria-1b-chat.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f16612f4d55ccaecdfb27c63c1b6dcb22f8e8cda92d7aa49528b8fe79529659 +size 1175943936 diff --git a/aria-1b-chat.f16.gguf b/aria-1b-chat.f16.gguf new file mode 100644 index 0000000..6657c5a --- /dev/null +++ b/aria-1b-chat.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc93848af18a06bc92917908fba35abdc3d39abf5084d16f9129ed1f04c45462 +size 2212571136