From 83bb577cbe89e561cabb1f953e1bc9979cfb7329 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Wed, 6 May 2026 03:24:44 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: Flexan/FoxyzGPT-X1.1-1.7B-GGUF Source: Original Platform --- .gitattributes | 49 ++++++++++++++++++++ FoxyzGPT-X1.1-1.7B.IQ3_M.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.IQ3_S.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q2_K.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q6_K.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.Q8_0.gguf | 3 ++ FoxyzGPT-X1.1-1.7B.f16.gguf | 3 ++ README.md | 84 ++++++++++++++++++++++++++++++++++ 16 files changed, 175 insertions(+) create mode 100644 .gitattributes create mode 100644 FoxyzGPT-X1.1-1.7B.IQ3_M.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.IQ3_S.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q2_K.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q6_K.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.Q8_0.gguf create mode 100644 FoxyzGPT-X1.1-1.7B.f16.gguf create mode 100644 README.md diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..5869d40 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,49 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.f16.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +FoxyzGPT-X1.1-1.7B.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/FoxyzGPT-X1.1-1.7B.IQ3_M.gguf b/FoxyzGPT-X1.1-1.7B.IQ3_M.gguf new file mode 100644 index 0000000..e77103a --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.IQ3_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20b72dd291a5704f08e04c8bef8ea0a0cf7257e51ac534034bbf7856bcf86716 +size 895661888 diff --git a/FoxyzGPT-X1.1-1.7B.IQ3_S.gguf b/FoxyzGPT-X1.1-1.7B.IQ3_S.gguf new file mode 100644 index 0000000..c5dc2fd --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.IQ3_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9860df3f6c2e62d2f120e77a5bf4d5905712b74b37776b80316f8d84de9b800 +size 867252032 diff --git a/FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf b/FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf new file mode 100644 index 0000000..ba695aa --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2a21817c4a1b3d43f0ef1f6674c92642e37bba3c2f352f0c22ab0fe209d74dc +size 1016280896 diff --git a/FoxyzGPT-X1.1-1.7B.Q2_K.gguf b/FoxyzGPT-X1.1-1.7B.Q2_K.gguf new file mode 100644 index 0000000..3b5ef48 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c131c1415332c5102ce7a0e4f293bdf4d8449b1c06c1504b7ef7903698c8001a +size 777795392 diff --git a/FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf b/FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf new file mode 100644 index 0000000..e374e68 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c963359a17a6695e4bbb24e73927555730582ec379e67669c3e591ebd339b71 +size 1003501376 diff --git a/FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf b/FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf new file mode 100644 index 0000000..dfec343 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69342a1c3a69e0dedb0ab8a202237e665e90696322955871e60a823182ce2aa4 +size 939538240 diff --git a/FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf b/FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf new file mode 100644 index 0000000..ab60314 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed81bc67e19d0fb5f3ca59a11ee412b0a18b1956b6339a026f8278dceefe9065 +size 867252032 diff --git a/FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf b/FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf new file mode 100644 index 0000000..f93c2e0 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97eca496b70b455be77f2a6da6f689788502a53556eb04308507e20cbacf296f +size 1107408704 diff --git a/FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf b/FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf new file mode 100644 index 0000000..002ff55 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1cda7b30110dcc70ef1d4e238b3fabe1f2fc1213060bdc6069d212db5c89949 +size 1060190016 diff --git a/FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf b/FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf new file mode 100644 index 0000000..3ef18e9 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbed3d6ddec2d14ee7da3b3bf87ce6be35eff8e991b210ef409b093ebc2731cc +size 1257879360 diff --git a/FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf b/FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf new file mode 100644 index 0000000..9efef8d --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0249ef2563c7072fd75f293ce8fbfd569583ded364c11d3e7ac1e4635c1c66a +size 1230583616 diff --git a/FoxyzGPT-X1.1-1.7B.Q6_K.gguf b/FoxyzGPT-X1.1-1.7B.Q6_K.gguf new file mode 100644 index 0000000..496f03d --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0f6f8f7ffff6724b06fd8eda338e555e04dd02ce06fc5da232304350bb836ae +size 1417754432 diff --git a/FoxyzGPT-X1.1-1.7B.Q8_0.gguf b/FoxyzGPT-X1.1-1.7B.Q8_0.gguf new file mode 100644 index 0000000..81b3f2f --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:710aa686541d1d30eb03055bcb007df98935615c5d669fb0d3ced79251abbb22 +size 1834426176 diff --git a/FoxyzGPT-X1.1-1.7B.f16.gguf b/FoxyzGPT-X1.1-1.7B.f16.gguf new file mode 100644 index 0000000..83602f0 --- /dev/null +++ b/FoxyzGPT-X1.1-1.7B.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55ef2e41d838c303ca5fc994e01357d2ecf27a2bc6b7c2daf3d0941600dbdaee +size 3447349056 diff --git a/README.md b/README.md new file mode 100644 index 0000000..c78d104 --- /dev/null +++ b/README.md @@ -0,0 +1,84 @@ +--- +license: cc-by-sa-4.0 +language: +- en +base_model: +- Qwen/Qwen3-1.7B +pipeline_tag: text-generation +--- + +# GGUF Files for FoxyzGPT-X1.1-1.7B + +These are the GGUF files for [Flexan/FoxyzGPT-X1.1-1.7B](https://huggingface.co/Flexan/FoxyzGPT-X1.1-1.7B). + +## Downloads + +| GGUF Link | Quantization | Description | +| ---- | ----- | ----------- | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q2_K.gguf) | Q2_K | Lowest quality | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q3_K_S.gguf) | Q3_K_S | | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.IQ3_S.gguf) | IQ3_S | Integer quant, preferable over Q3_K_S | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.IQ3_M.gguf) | IQ3_M | Integer quant | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q3_K_M.gguf) | Q3_K_M | | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q3_K_L.gguf) | Q3_K_L | | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.IQ4_XS.gguf) | IQ4_XS | Integer quant | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q4_K_S.gguf) | Q4_K_S | Fast with good performance | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q4_K_M.gguf) | Q4_K_M | **Recommended:** Perfect mix of speed and performance | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q5_K_S.gguf) | Q5_K_S | | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q5_K_M.gguf) | Q5_K_M | | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q6_K.gguf) | Q6_K | Very good quality | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.Q8_0.gguf) | Q8_0 | Best quality | +| [Download](https://huggingface.co/Flexan/Flexan-FoxyzGPT-X1.1-1.7B-GGUF/resolve/main/FoxyzGPT-X1.1-1.7B.f16.gguf) | f16 | Full precision, don't bother; use a quant | + +# FoxyzGPT X1.1 1.7B + +## Description + +FoxyzGPT X1.1 1.7B is an instruct LLM consisting of 1.7B parameters trained to talk in a human conversational manner. It does not support reasoning nor tool-calling (although the base model does). +The model was LoRA fine-tuned with [Qwen/Qwen3-1.7B](https://huggingface.co/Qwen/Qwen3-1.7B) as base model. + +This model is trained on a private dataset provided by [Foxyz](https://huggingface.co/Foxyz). The model has adapted this persona and will not be accurate at answering questions at all. + +## Chat Format + +FoxyzGPT X1.1 1.7B uses the ChatML format, e.g.: +```text +<|im_start|>system +System message<|im_end|> +<|im_start|>user +User prompt<|im_end|> +<|im_start|>assistant +Assistant response<|im_end|> +``` + +## Usage + +This model is trained on one system prompt only. Therefore, it is recommended to use the one stated here: +```text +You are Foxyz (username: foxyz9248) and you are talking to `` on Discord in a direct message. Use `~>` to signal the start of a new message (you can send multiple messages this way). Use silly language. +``` + +You should replace `` with your username. You can obviously use this LLM for platforms other than Discord, but in the system prompt it's best to explicitly state the user is on Discord (the model has not been trained with other system prompt variations besides the username). + +The assistant response has the following format: +```text +<|im_start|>assistant +~> HLELOO!!! +~> howa reyou :D<|im_end|> +``` + +Note that the prompt should be formatted differently. The prompt is composed as a list of messages using the `~>` arrow notation. `~>` marks the start of a new message. This approach allows you to send multiple messages while still allowing multi-line without using multiple `user` roles: +```text +<|im_start|>user +~> my first message +~> my second message +~> this is +a message +that has 4 +lines +~> and this is my fourth message<|im_end|> +``` + +## Datasets + +1. **Private dataset** *3.1k chats* \ No newline at end of file