Super-squash history to reclaim storage
This commit is contained in:
81
.gitattributes
vendored
Normal file
81
.gitattributes
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
f16/SWE-Dev-32B-f16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
f16/SWE-Dev-32B-f16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-f16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-bf16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-f16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-bf16_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-f16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-bf16_q4_k.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q2_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q2_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q2_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq1_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq1_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq2_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq2_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq2_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq2_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
SWE-Dev-32B.imatrix filter=lfs diff=lfs merge=lfs -text
|
||||
bf16/SWE-Dev-32B-bf16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
bf16/SWE-Dev-32B-bf16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
||||
121
README.md
Normal file
121
README.md
Normal file
@@ -0,0 +1,121 @@
|
||||
---
|
||||
base_model:
|
||||
- Qwen/Qwen2.5-Coder-32B-Instruct
|
||||
library_name: transformers
|
||||
license: mit
|
||||
pipeline_tag: text-generation
|
||||
---
|
||||
|
||||
# <span style="color: #7FFF7F;">SWE-Dev-32B GGUF Models</span>
|
||||
|
||||
|
||||
## <span style="color: #7F7FFF;">Model Generation Details</span>
|
||||
|
||||
This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`0a5a3b5c`](https://github.com/ggerganov/llama.cpp/commit/0a5a3b5cdfd887cf0f8e09d9ff89dee130cfcdde).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## <span style="color: #7FFF7F;">Quantization Beyond the IMatrix</span>
|
||||
|
||||
I've been experimenting with a new quantization approach that selectively elevates the precision of key layers beyond what the default IMatrix configuration provides.
|
||||
|
||||
In my testing, standard IMatrix quantization underperforms at lower bit depths, especially with Mixture of Experts (MoE) models. To address this, I'm using the `--tensor-type` option in `llama.cpp` to manually "bump" important layers to higher precision. You can see the implementation here:
|
||||
👉 [Layer bumping with llama.cpp](https://github.com/Mungert69/GGUFModelBuilder/blob/main/model-converter/tensor_list_builder.py)
|
||||
|
||||
While this does increase model file size, it significantly improves precision for a given quantization level.
|
||||
|
||||
### **I'd love your feedback—have you tried this? How does it perform for you?**
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
<a href="https://readyforquantum.com/huggingface_gguf_selection_guide.html" style="color: #7FFF7F;">
|
||||
Click here to get info on choosing the right GGUF model format
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
<!--Begin Original Model Card-->
|
||||
|
||||
|
||||
📝 [Paper](https://arxiv.org/abs/2506.07636) | 🌐 [Github](https://github.com/THUDM/SWE-Dev/)
|
||||
|
||||
- 🤗 [SWE-Dev-7B (Qwen-2.5-Coder-7B-Instruct)](https://huggingface.co/THUDM/SWE-Dev-7B/)
|
||||
- 🤗 [SWE-Dev-9B (GLM-4-9B-Chat)](https://huggingface.co/THUDM/SWE-Dev-9B/)
|
||||
- 🤗 [SWE-Dev-32B (Qwen-2.5-Coder-32B-Instruct)](https://huggingface.co/THUDM/SWE-Dev-32B/)
|
||||
- 🤗 [SWE-Dev-train (Training Data)](https://huggingface.co/datasets/THUDM/SWE-Dev-train/)
|
||||
|
||||
🚀 SWE-Dev, an open-source Agent for Software Engineering tasks! This repository contains the SWE-Dev-32B model as presented in the paper [SWE-Dev: Building Software Engineering Agents with Training and Inference Scaling](https://huggingface.co/papers/2506.07636).
|
||||
|
||||
💡 We develop a comprehensive pipeline for creating developer-oriented datasets from GitHub repositories, including issue tracking, code localization, test case generation, and evaluation.
|
||||
|
||||
🔧 Based on open-source frameworks (OpenHands) and models, SWE-Dev-7B and 32B achieved solve rates of 23.4% and 36.6% on SWE-bench-Verified, respectively, even approaching the performance of GPT-4o.
|
||||
|
||||
📚 We find that training data scaling and inference scaling can both effectively boost the performance of models on SWE-bench. Moreover, higher data quality further improves this trend when combined with reinforcement fine-tuning (RFT). For inference scaling specifically, the solve rate on SWE-Dev increased from 34.0% at 30 rounds to 36.6% at 75 rounds.
|
||||
|
||||
<!--End Original Model Card-->
|
||||
|
||||
---
|
||||
|
||||
# <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
|
||||
|
||||
Help me test my **AI-Powered Quantum Network Monitor Assistant** with **quantum-ready security checks**:
|
||||
|
||||
👉 [Quantum Network Monitor](https://readyforquantum.com/?assistant=open&utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
|
||||
|
||||
|
||||
The full Open Source Code for the Quantum Network Monitor Service available at my github repos ( repos with NetworkMonitor in the name) : [Source Code Quantum Network Monitor](https://github.com/Mungert69). You will also find the code I use to quantize the models if you want to do it yourself [GGUFModelBuilder](https://github.com/Mungert69/GGUFModelBuilder)
|
||||
|
||||
💬 **How to test**:
|
||||
Choose an **AI assistant type**:
|
||||
- `TurboLLM` (GPT-4.1-mini)
|
||||
- `HugLLM` (Hugginface Open-source models)
|
||||
- `TestLLM` (Experimental CPU-only)
|
||||
|
||||
### **What I’m Testing**
|
||||
I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
|
||||
- **Function calling** against live network services
|
||||
- **How small can a model go** while still handling:
|
||||
- Automated **Nmap security scans**
|
||||
- **Quantum-readiness checks**
|
||||
- **Network Monitoring tasks**
|
||||
|
||||
🟡 **TestLLM** – Current experimental model (llama.cpp on 2 CPU threads on huggingface docker space):
|
||||
- ✅ **Zero-configuration setup**
|
||||
- ⏳ 30s load time (slow inference but **no API costs**) . No token limited as the cost is low.
|
||||
- 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
|
||||
|
||||
### **Other Assistants**
|
||||
🟢 **TurboLLM** – Uses **gpt-4.1-mini** :
|
||||
- **It performs very well but unfortunatly OpenAI charges per token. For this reason tokens usage is limited.
|
||||
- **Create custom cmd processors to run .net code on Quantum Network Monitor Agents**
|
||||
- **Real-time network diagnostics and monitoring**
|
||||
- **Security Audits**
|
||||
- **Penetration testing** (Nmap/Metasploit)
|
||||
|
||||
🔵 **HugLLM** – Latest Open-source models:
|
||||
- 🌐 Runs on Hugging Face Inference API. Performs pretty well using the lastest models hosted on Novita.
|
||||
|
||||
### 💡 **Example commands you could test**:
|
||||
1. `"Give me info on my websites SSL certificate"`
|
||||
2. `"Check if my server is using quantum safe encyption for communication"`
|
||||
3. `"Run a comprehensive security audit on my server"`
|
||||
4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a [Quantum Network Monitor Agent](https://readyforquantum.com/Download/?utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme) to run the .net code on. This is a very flexible and powerful feature. Use with caution!
|
||||
|
||||
### Final Word
|
||||
|
||||
I fund the servers used to create these model files, run the Quantum Network Monitor service, and pay for inference from Novita and OpenAI—all out of my own pocket. All the code behind the model creation and the Quantum Network Monitor project is [open source](https://github.com/Mungert69). Feel free to use whatever you find helpful.
|
||||
|
||||
If you appreciate the work, please consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) ☕. Your support helps cover service costs and allows me to raise token limits for everyone.
|
||||
|
||||
I'm also open to job opportunities or sponsorship.
|
||||
|
||||
Thank you! 😊
|
||||
3
SWE-Dev-32B-bf16_q8_0.gguf
Normal file
3
SWE-Dev-32B-bf16_q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:32692e18ff299af2f678fd2ed18b64ff189b35903d22ea8afb3f8a9aa64a7117
|
||||
size 46661601888
|
||||
3
SWE-Dev-32B-f16_q8_0.gguf
Normal file
3
SWE-Dev-32B-f16_q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c10806227c3a8f965707c10c5d919d01845c9f08055885689c84a9e2746ad05d
|
||||
size 46661601888
|
||||
3
SWE-Dev-32B-iq1_m.gguf
Normal file
3
SWE-Dev-32B-iq1_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0fecf26aa92f4a1d361b3bc44a403dd9b29282b448e40fcaa9a2dd9f0b287b1e
|
||||
size 9742306208
|
||||
3
SWE-Dev-32B-iq1_s.gguf
Normal file
3
SWE-Dev-32B-iq1_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0266b3b05ef43db6e201924d9e1fd2c54ec5fb84dfde6902a03d0817939344f0
|
||||
size 8992492448
|
||||
3
SWE-Dev-32B-iq2_m.gguf
Normal file
3
SWE-Dev-32B-iq2_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:df8287ce37ae93f398584651d6c16d52d324eab8142e6ea74e3b080cf06b4421
|
||||
size 11985849248
|
||||
3
SWE-Dev-32B-iq2_s.gguf
Normal file
3
SWE-Dev-32B-iq2_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4b725e674efd3394cd6ff0828fd959702aab44c89a10b48afbe5e302e2cb65d2
|
||||
size 11382262688
|
||||
3
SWE-Dev-32B-iq2_xs.gguf
Normal file
3
SWE-Dev-32B-iq2_xs.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:99ec87b12a839f2ac5376950fc415f161e3fe9c67c4d5b91d5489dbb0c4ebeb9
|
||||
size 11016326048
|
||||
3
SWE-Dev-32B-iq2_xxs.gguf
Normal file
3
SWE-Dev-32B-iq2_xxs.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e7d11619a2f5d5345dc11b4e18a10bbe73a7e4fc4c98caffb691cb703d1ee588
|
||||
size 10124954528
|
||||
3
SWE-Dev-32B-iq3_m.gguf
Normal file
3
SWE-Dev-32B-iq3_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f6c4681eb9f981fc6d388f049e71b55e92f23ea71e69eabd76d48837b8d53449
|
||||
size 15545355168
|
||||
3
SWE-Dev-32B-iq3_s.gguf
Normal file
3
SWE-Dev-32B-iq3_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:34b3ddaa06b41383dd58e80811f612a790b4f436881f494e5d32c136f54b1c7a
|
||||
size 15545355168
|
||||
3
SWE-Dev-32B-iq3_xs.gguf
Normal file
3
SWE-Dev-32B-iq3_xs.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:3a7ecd04b88b4e93d2a8be7e0202f410f31ad699848f2167eb020d9f6d9be776
|
||||
size 13967002528
|
||||
3
SWE-Dev-32B-iq3_xxs.gguf
Normal file
3
SWE-Dev-32B-iq3_xxs.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:94b8298b3b3ffbb1471d86ed8242e37652aadad692d879f33f6743b67112c482
|
||||
size 13628836768
|
||||
3
SWE-Dev-32B-iq4_nl.gguf
Normal file
3
SWE-Dev-32B-iq4_nl.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:32afed051904c16b07b7c9659449ab78080f4bac92262d18974e93253281aee9
|
||||
size 18682174368
|
||||
3
SWE-Dev-32B-iq4_xs.gguf
Normal file
3
SWE-Dev-32B-iq4_xs.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:35ff85f3cc07305922b605328e4f4ee231147c972d1be9491c1bfc4938d47ff6
|
||||
size 17693154208
|
||||
3
SWE-Dev-32B-q2_k_m.gguf
Normal file
3
SWE-Dev-32B-q2_k_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:69178b9aa436c50d8acdd04367eae99b56ecc396d59dbede5eb6acacfe108982
|
||||
size 12590295968
|
||||
3
SWE-Dev-32B-q2_k_s.gguf
Normal file
3
SWE-Dev-32B-q2_k_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e3bb375c9ca8e2de87ee8cd8cd0c6af1a22f4dee3d55f0395f3b1885ab7f6094
|
||||
size 12383488928
|
||||
3
SWE-Dev-32B-q3_k_m.gguf
Normal file
3
SWE-Dev-32B-q3_k_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b9c82f0ff9f5e5cf1ecb1fd3aba0facd45d018165ea6c14cda8d89fa9c0c2e2d
|
||||
size 16126127008
|
||||
3
SWE-Dev-32B-q3_k_s.gguf
Normal file
3
SWE-Dev-32B-q3_k_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:1d5ad6c739d75dd706c4da4a7c5c4aa8aa06b303ff5852ab48cf21936bebf021
|
||||
size 15919319968
|
||||
3
SWE-Dev-32B-q4_0.gguf
Normal file
3
SWE-Dev-32B-q4_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:25a91b39798392e29216f85a47db74d3546c1dab1acb18a7c9c59fad949bced9
|
||||
size 18439506848
|
||||
3
SWE-Dev-32B-q4_1.gguf
Normal file
3
SWE-Dev-32B-q4_1.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4beddef8347978ffa417ee7c4180a258ad3150a802f422edbb6c6e9bd48f9078
|
||||
size 20487179168
|
||||
3
SWE-Dev-32B-q4_k_m.gguf
Normal file
3
SWE-Dev-32B-q4_k_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4ebf59bd3c46c1a03fe18340fd6c06d95e01d7671f5a209bce97832aec45fb65
|
||||
size 19824978848
|
||||
3
SWE-Dev-32B-q4_k_s.gguf
Normal file
3
SWE-Dev-32B-q4_k_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:db62de42a9f85359bfc0c6cf9f9909707f9e4e86ecb8c24cdff81e5e06ddbbf2
|
||||
size 19161426848
|
||||
3
SWE-Dev-32B-q5_0.gguf
Normal file
3
SWE-Dev-32B-q5_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:ca250ed769559f6773628961cfd955ad45113ba1b86878c9a1842c428a69e3de
|
||||
size 22534851488
|
||||
3
SWE-Dev-32B-q5_1.gguf
Normal file
3
SWE-Dev-32B-q5_1.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f1349abd83682dbefac879e382378ad61e40fd65cd2d8c8738f8c9bf452bb36f
|
||||
size 24582523808
|
||||
3
SWE-Dev-32B-q5_k_m.gguf
Normal file
3
SWE-Dev-32B-q5_k_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cfb334089481ec472986e0fbf9e28ca43e95503cea6839ec2c20a83df9894484
|
||||
size 23414303648
|
||||
3
SWE-Dev-32B-q5_k_s.gguf
Normal file
3
SWE-Dev-32B-q5_k_s.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2342aeae5042714ea12ffa0b607042e4b3a738f7f05fc1492b612a25e4a9437b
|
||||
size 23063358368
|
||||
3
SWE-Dev-32B-q6_k_m.gguf
Normal file
3
SWE-Dev-32B-q6_k_m.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2623e42a6949ea1949be633c4732a9dd505bc69919cd28a4532d462f188aa80d
|
||||
size 26886155168
|
||||
3
SWE-Dev-32B-q8_0.gguf
Normal file
3
SWE-Dev-32B-q8_0.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:dbefb397865aa86f29d7b0c352e4df1211abe5a8bc87c17a10d5f60570d1d55b
|
||||
size 34820885088
|
||||
3
SWE-Dev-32B.imatrix
Normal file
3
SWE-Dev-32B.imatrix
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b04c9a698920b9f32eab255d2c7e60a6cc1a0a2faa289fce29dbd22ec28da78a
|
||||
size 14957132
|
||||
3
bf16/SWE-Dev-32B-bf16-00001-of-00002.gguf
Normal file
3
bf16/SWE-Dev-32B-bf16-00001-of-00002.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c4d21e098d32c4e91334bd0b525ec6a5dd262b92c1de21df6c97156fc838a4d4
|
||||
size 45902462976
|
||||
3
bf16/SWE-Dev-32B-bf16-00002-of-00002.gguf
Normal file
3
bf16/SWE-Dev-32B-bf16-00002-of-00002.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e28cf065e3feda607abb4d70237e407bed95f9b5e86f1f6b3ca90f7486ed9884
|
||||
size 19633506912
|
||||
3
f16/SWE-Dev-32B-f16-00001-of-00002.gguf
Normal file
3
f16/SWE-Dev-32B-f16-00001-of-00002.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b5e235d03a4780ff1b7dd759884857abd30a04ae44692e4fa2fd000ba0b81ed5
|
||||
size 45902462976
|
||||
3
f16/SWE-Dev-32B-f16-00002-of-00002.gguf
Normal file
3
f16/SWE-Dev-32B-f16-00002-of-00002.gguf
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:38c2baf26f9d06c7e70083870d12b27052410296911d241c1f1ee994403c0748
|
||||
size 19633506912
|
||||
Reference in New Issue
Block a user