commit dfdeccf0cae2f7dbf00b8856ae8a34dc97539d81 Author: ModelHub XC Date: Tue May 5 23:39:01 2026 +0800 初始化项目,由ModelHub XC社区提供模型 Model: CelesteImperia/Qwen2.5-Coder-7B-Instruct-Platinum-GGUF Source: Original Platform diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..7ddd8dd --- /dev/null +++ b/.gitattributes @@ -0,0 +1,39 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Qwen2.5-Coder-7B-Instruct-Platinum-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Qwen2.5-Coder-7B-Instruct-Platinum-F16.gguf filter=lfs diff=lfs merge=lfs -text +Qwen2.5-Coder-7B-Instruct-Platinum-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..1548cc3 --- /dev/null +++ b/LICENSE @@ -0,0 +1,17 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + ... [Full Apache 2.0 Text omitted for brevity but should be the standard 2004 version] + + Copyright 2024 Alibaba Cloud (Qwen Team) + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 \ No newline at end of file diff --git a/Qwen2.5-Coder-7B-Instruct-Platinum-F16.gguf b/Qwen2.5-Coder-7B-Instruct-Platinum-F16.gguf new file mode 100644 index 0000000..46d53c1 --- /dev/null +++ b/Qwen2.5-Coder-7B-Instruct-Platinum-F16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:274c0eb05fe4a712805d4b999ad8e419f14a908ef1ca1b310a737a418ba15452 +size 15237853792 diff --git a/Qwen2.5-Coder-7B-Instruct-Platinum-Q4_K_M.gguf b/Qwen2.5-Coder-7B-Instruct-Platinum-Q4_K_M.gguf new file mode 100644 index 0000000..0e3bd54 --- /dev/null +++ b/Qwen2.5-Coder-7B-Instruct-Platinum-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46caa6175bbef0b12ffe552f702fb3949cb72c11acc29fdc935e10640b57f5a9 +size 4683074144 diff --git a/Qwen2.5-Coder-7B-Instruct-Platinum-Q5_K_M.gguf b/Qwen2.5-Coder-7B-Instruct-Platinum-Q5_K_M.gguf new file mode 100644 index 0000000..4995738 --- /dev/null +++ b/Qwen2.5-Coder-7B-Instruct-Platinum-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38d6bd18220d7ae5251470a35eb57f469748b5bbf210eb0048f1e0365229a1e7 +size 5444831840 diff --git a/Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf b/Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf new file mode 100644 index 0000000..b35a46d --- /dev/null +++ b/Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b8e170136541e170f5c08b4fe5038c367c90922f3444685e0ed4c3bf61b9ca +size 6254199392 diff --git a/README.md b/README.md new file mode 100644 index 0000000..2512df0 --- /dev/null +++ b/README.md @@ -0,0 +1,96 @@ +--- +base_model: Qwen/Qwen2.5-Coder-7B-Instruct +library_name: gguf +pipeline_tag: text-generation +license: apache-2.0 +tags: +- gguf +- llama-cpp +- qwen2.5-coder +- celeste-imperia +--- + +# Qwen2.5-Coder-7B-Instruct-GGUF (Platinum Series) + +![Status](https://img.shields.io/badge/Status-Active-success) +![Format](https://img.shields.io/badge/Format-GGUF-green) +![Series](https://img.shields.io/badge/Series-Platinum-silver) +[![Support](https://img.shields.io/badge/Support-Razorpay-orange)](https://razorpay.me/@huggingface) + +This repository contains the **Platinum Series** universal GGUF release of **Qwen2.5-Coder-7B-Instruct**. This collection provides multiple quantization levels optimized for cross-platform performance, specializing in high-precision code generation and technical reasoning. + +## 📦 Available Files & Quantization Details + +| File Name | Quantization | Size | Accuracy | Recommended For | +| :--- | :--- | :--- | :--- | :--- | +| **Qwen2.5-Coder-7B-Instruct-Platinum-F16.gguf** | FP16 | ~15.0 GB | 100% | Master Reference / Benchmarking | +| **Qwen2.5-Coder-7B-Instruct-Platinum-Q8_0.gguf** | Q8_0 | ~8.0 GB | 99.9% | Platinum Reference / High-Fidelity | +| **Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf** | Q6_K | ~6.3 GB | 99.8% | High-Quality Coding Assistant | +| **Qwen2.5-Coder-7B-Instruct-Platinum-Q5_K_M.gguf** | Q5_K_M | ~5.5 GB | 99.5% | Balanced Desktop Performance | +| **Qwen2.5-Coder-7B-Instruct-Platinum-Q4_K_M.gguf** | Q4_K_M | ~4.7 GB | 99.0% | Efficiency / Mid-Range Hardware | + +--- + +## 🐍 Python Inference (llama-cpp-python) + +To run these engines using Python: + +```python +from llama_cpp import Llama + +llm = Llama( + model_path="Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf", + n_gpu_layers=-1, # Target all layers to NVIDIA/Apple GPU + n_ctx=8192 # High context for coding tasks +) + +output = llm("Write a C# class that implements a thread-safe singleton pattern.", max_tokens=300) +print(output["choices"][0]["text"]) +``` + +--- + +## 💻 For C# / .NET Users (LLamaSharp) + +This collection is fully compatible with .NET applications via the ``LLamaSharp`` library. + +```csharp +using LLama.Common; +using LLama; + +var parameters = new ModelParams("Qwen2.5-Coder-7B-Instruct-Platinum-Q6_K.gguf") { + ContextSize = 8192, + GpuLayerCount = 35 +}; + +using var model = LLamaWeights.LoadFromFile(parameters); +using var context = model.CreateContext(parameters); +var executor = new InteractiveExecutor(context); + +Console.WriteLine("Coding Specialist Active."); +``` + +--- + +## 🏗️ Technical Details +- **Optimization Tool:** llama.cpp (CUDA-accelerated) +- **Architecture:** Qwen-2.5-Coder (7B) +- **Hardware Validation:** Dual-GPU (RTX 3090 + RTX A4000) + +--- + +### ☕ Support the Forge + +Maintaining high-capacity workstations for model conversion requires hardware investment. If these tools power your production software, please consider supporting the development: + +| Platform | Support Link | +| :--- | :--- | +| **Global & India** | [Support via Razorpay](https://razorpay.me/@huggingface) | + +**Scan to support via UPI (India Only):** + + + +--- + +**Connect with the architect:** [Abhishek Jaiswal on LinkedIn](https://www.linkedin.com/in/abhishek-jaiswal-524056a/) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..86c375f --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +optimum-intel[openvino,nncf]>=1.20.0 +transformers>=4.45.0 +accelerate +sentencepiece \ No newline at end of file