From 2c8731ccb18486fbbf4dfd4be48d7dd8e7778c5f Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Fri, 10 Apr 2026 18:27:06 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: gss1147/Hunyuan-PythonGOD-0.5B-GGUF Source: Original Platform --- .gitattributes | 38 +++++++++++++ Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf | 3 + Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf | 3 + Hunyuan-PythonGOD-0.5B.f16.gguf | 3 + README.md | 90 ++++++++++++++++++++++++++++++ 5 files changed, 137 insertions(+) create mode 100644 .gitattributes create mode 100644 Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf create mode 100644 Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf create mode 100644 Hunyuan-PythonGOD-0.5B.f16.gguf create mode 100644 README.md diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..558c0eb --- /dev/null +++ b/.gitattributes @@ -0,0 +1,38 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Hunyuan-PythonGOD-0.5B.f16.gguf filter=lfs diff=lfs merge=lfs -text +Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf b/Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf new file mode 100644 index 0000000..a631ca8 --- /dev/null +++ b/Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e7298f834600399fe9b50412e7c3574c807bcbe34ae801f786f84e4e33ce2a +size 354971808 diff --git a/Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf b/Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf new file mode 100644 index 0000000..7b44e3b --- /dev/null +++ b/Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d56e68e96cf7608b2944a6d30ccbfb97311b9e8a6c0e7255ac7ec1643fe7c809 +size 399798432 diff --git a/Hunyuan-PythonGOD-0.5B.f16.gguf b/Hunyuan-PythonGOD-0.5B.f16.gguf new file mode 100644 index 0000000..96f1140 --- /dev/null +++ b/Hunyuan-PythonGOD-0.5B.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e947176e3471dbab56000434df4435e0db042f107e56d93502fbda3c66d2b4e9 +size 1083223680 diff --git a/README.md b/README.md new file mode 100644 index 0000000..31ad4d5 --- /dev/null +++ b/README.md @@ -0,0 +1,90 @@ +--- +language: + - en +license: other +library_name: transformers +pipeline_tag: text-generation +tags: + - gguf + - hunyuan + - python + - code-generation + - code-assistant + - instruct + - conversational + - causal-lm + - full-finetune +base_model: + - tencent/Hunyuan-0.5B-Instruct +datasets: + - WithinUsAI/Python_GOD_Coder_Omniforge_AI_12k + - WithinUsAI/Python_GOD_Coder_5k + - WithinUsAI/Legend_Python_CoderV.1 +model-index: + - name: Hunyuan-PythonGOD-0.5B-GGUF + results: [] +--- + +# Hunyuan-PythonGOD-0.5B-GGUF + +**Hunyuan-PythonGOD-0.5B-GGUF** is a compact Python-specialized coding model released in GGUF format for lightweight local inference. It is derived from a full fine-tune of `tencent/Hunyuan-0.5B-Instruct` and is aimed at code generation, Python scripting, debugging help, implementation tasks, and coding-oriented chat workflows. + +This repo provides quantized GGUF builds for efficient use with llama.cpp-compatible runtimes and other GGUF-serving backends. + +## Model Details + +### Base Model +- **Base model:** `tencent/Hunyuan-0.5B-Instruct` +- **Architecture:** Causal decoder-only language model +- **Parameter scale:** ~0.5B +- **Specialization:** Python coding and general code-assistant behavior +- **Release format:** GGUF + +### Included Files +- `Hunyuan-PythonGOD-0.5B.Q4_K_M.gguf` +- `Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf` +- `Hunyuan-PythonGOD-0.5B.f16.gguf` + +## Training Summary + +This GGUF release is based on a **full fine-tune**, not an adapter-only export. + +### Training Datasets +- `WithinUsAI/Python_GOD_Coder_Omniforge_AI_12k` +- `WithinUsAI/Python_GOD_Coder_5k` +- `WithinUsAI/Legend_Python_CoderV.1` + +### Training Characteristics +- Full-parameter fine-tuning +- Python/code-oriented instruction tuning +- Exported as standard model weights before GGUF conversion +- Intended for compact coding assistance and local inference experimentation + +## Intended Uses + +### Good Fits +- Python function generation +- Python script writing +- Debugging assistance +- Automation script drafting +- Code-oriented local assistants +- Small-model coding experiments + +### Not Intended For +- Safety-critical software deployment without review +- Autonomous execution without sandboxing +- Guaranteed bug-free or secure code generation +- Medical, legal, or financial decision support + +## Quantization Notes + +This repo includes multiple tradeoff points: + +- **Q4_K_M**: smaller footprint, faster/lighter inference +- **Q5_K_M**: stronger quality-to-size balance +- **F16**: highest fidelity in this repo, larger memory cost + +## Example llama.cpp Usage + +```bash +./llama-cli -m Hunyuan-PythonGOD-0.5B.Q5_K_M.gguf -p "Write a Python function that validates an email address." -n 256 \ No newline at end of file