From be5a459dfb00e94c3d42f0400e2da8bf69066c78 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Wed, 6 May 2026 12:50:48 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF Source: Original Platform --- .gitattributes | 36 ++++++++++++++ Chat Template | 12 +++++ README.md | 70 ++++++++++++++++++++++++++++ stablelm-zephyr-3b-heretic-q4_0.gguf | 3 ++ 4 files changed, 121 insertions(+) create mode 100644 .gitattributes create mode 100644 Chat Template create mode 100644 README.md create mode 100644 stablelm-zephyr-3b-heretic-q4_0.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..811c786 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,36 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +stablelm-zephyr-3b-heretic-q4_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Chat Template b/Chat Template new file mode 100644 index 0000000..7ed1ca8 --- /dev/null +++ b/Chat Template @@ -0,0 +1,12 @@ +{%- for message in messages %} +{%- if message['role'] == 'user' %} + {{- '<|user|>' + message['content'] + eos_token }} +{%- elif message['role'] == 'system' %} + {{- '<|system|>' + message['content'] + eos_token }} +{%- elif message['role'] == 'assistant' %} + {{- '<|assistant|>' + message['content'] + eos_token }} +{%- endif %} +{%- if loop.last and add_generation_prompt %} + {{- '<|assistant|>' }} +{%- endif %} +{%- endfor %} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..0c237f7 --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +--- +license: other +license_name: license.md +license_link: https://huggingface.co/stabilityai/stablelm-zephyr-3b/blob/main/LICENSE.md +base_model: ChiKoi7/stablelm-zephyr-3b-Heretic +tags: +- Zephyr +- stablelm +- 3b +- stabilityai +- Heretic +- Uncensored +- Abliterated +- llama-cpp +- gguf-my-repo +datasets: +- HuggingFaceH4/ultrachat_200k +- HuggingFaceH4/ultrafeedback_binarized +- meta-math/MetaMathQA +- WizardLMTeam/WizardLM_evol_instruct_V2_196k +- Intel/orca_dpo_pairs +language: +- en +pipeline_tag: text-generation +library_name: transformers +--- + +# PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF +This model was converted to GGUF format from [`ChiKoi7/stablelm-zephyr-3b-Heretic`](https://huggingface.co/ChiKoi7/stablelm-zephyr-3b-Heretic) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. +Refer to the [original model card](https://huggingface.co/ChiKoi7/stablelm-zephyr-3b-Heretic) for more details on the model. + +## Use with llama.cpp +Install llama.cpp through brew (works on Mac and Linux) + +```bash +brew install llama.cpp + +``` +Invoke the llama.cpp server or the CLI. + +### CLI: +```bash +llama-cli --hf-repo PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF --hf-file stablelm-zephyr-3b-heretic-q4_0.gguf -p "The meaning to life and the universe is" +``` + +### Server: +```bash +llama-server --hf-repo PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF --hf-file stablelm-zephyr-3b-heretic-q4_0.gguf -c 2048 +``` + +Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. + +Step 1: Clone llama.cpp from GitHub. +``` +git clone https://github.com/ggerganov/llama.cpp +``` + +Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). +``` +cd llama.cpp && LLAMA_CURL=1 make +``` + +Step 3: Run inference through the main binary. +``` +./llama-cli --hf-repo PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF --hf-file stablelm-zephyr-3b-heretic-q4_0.gguf -p "The meaning to life and the universe is" +``` +or +``` +./llama-server --hf-repo PJRM/stablelm-zephyr-3b-Heretic-Q4_0-GGUF --hf-file stablelm-zephyr-3b-heretic-q4_0.gguf -c 2048 +``` diff --git a/stablelm-zephyr-3b-heretic-q4_0.gguf b/stablelm-zephyr-3b-heretic-q4_0.gguf new file mode 100644 index 0000000..c4c5953 --- /dev/null +++ b/stablelm-zephyr-3b-heretic-q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5190da17cad8c5024248254ce85c1ec3fe55a3a476524dd8eef71fa3544ef57d +size 1608573056