From 702f78bd947fc8dd9d1062a1a4cd7a9a70b76608 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 11 Apr 2026 09:58:57 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: Flexan/nopenet-nope-edge-GGUF-i3 Source: Original Platform --- .gitattributes | 49 +++++ README.md | 445 ++++++++++++++++++++++++++++++++++++++++++ nope-edge.IQ3_M.gguf | 3 + nope-edge.IQ3_S.gguf | 3 + nope-edge.IQ4_XS.gguf | 3 + nope-edge.Q2_K.gguf | 3 + nope-edge.Q3_K_L.gguf | 3 + nope-edge.Q3_K_M.gguf | 3 + nope-edge.Q3_K_S.gguf | 3 + nope-edge.Q4_K_M.gguf | 3 + nope-edge.Q4_K_S.gguf | 3 + nope-edge.Q5_K_M.gguf | 3 + nope-edge.Q5_K_S.gguf | 3 + nope-edge.Q6_K.gguf | 3 + nope-edge.Q8_0.gguf | 3 + nope-edge.f16.gguf | 3 + 16 files changed, 536 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 nope-edge.IQ3_M.gguf create mode 100644 nope-edge.IQ3_S.gguf create mode 100644 nope-edge.IQ4_XS.gguf create mode 100644 nope-edge.Q2_K.gguf create mode 100644 nope-edge.Q3_K_L.gguf create mode 100644 nope-edge.Q3_K_M.gguf create mode 100644 nope-edge.Q3_K_S.gguf create mode 100644 nope-edge.Q4_K_M.gguf create mode 100644 nope-edge.Q4_K_S.gguf create mode 100644 nope-edge.Q5_K_M.gguf create mode 100644 nope-edge.Q5_K_S.gguf create mode 100644 nope-edge.Q6_K.gguf create mode 100644 nope-edge.Q8_0.gguf create mode 100644 nope-edge.f16.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..d2fc3c0 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,49 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +nope-edge.f16.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..4946c82 --- /dev/null +++ b/README.md @@ -0,0 +1,445 @@ +--- +license: other +license_name: nope-edge-community-license-v1.0 +license_link: LICENSE.md +language: + - en +tags: + - safety + - crisis-detection + - text-classification + - mental-health + - content-safety + - suicide-prevention +base_model: nopenet/nope-edge +pipeline_tag: text-generation +library_name: transformers +extra_gated_heading: "Access NOPE Edge" +extra_gated_description: "This model is available for **research, academic, nonprofit, and evaluation use**. Commercial production use requires a separate license. Please read the [license terms below](#nope-edge-community-license-v10) before downloading." +extra_gated_button_content: "Agree and download" +extra_gated_fields: + I am using this for research, academic, nonprofit, personal, or evaluation purposes: + type: checkbox + I agree to the NOPE Edge Community License v1.0: + type: checkbox +--- + +# GGUF Files for nope-edge + +These are the GGUF files for [nopenet/nope-edge](https://huggingface.co/nopenet/nope-edge). + +> [!NOTE] +> **Note:** This is the **third iteration/revision** of this model. A revision is made when a model repo gets updated with a new model. +> This is the latest version of the model. +> +> [[first iteration (1)](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i1)] [[second iteration (2)](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i2)] + +## Downloads + +| GGUF Link | Quantization | Description | +| ---- | ----- | ----------- | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q2_K.gguf) | Q2_K | Lowest quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q3_K_S.gguf) | Q3_K_S | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.IQ3_S.gguf) | IQ3_S | Integer quant, preferable over Q3_K_S | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.IQ3_M.gguf) | IQ3_M | Integer quant | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q3_K_M.gguf) | Q3_K_M | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q3_K_L.gguf) | Q3_K_L | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.IQ4_XS.gguf) | IQ4_XS | Integer quant | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q4_K_S.gguf) | Q4_K_S | Fast with good performance | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q4_K_M.gguf) | Q4_K_M | **Recommended:** Perfect mix of speed and performance | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q5_K_S.gguf) | Q5_K_S | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q5_K_M.gguf) | Q5_K_M | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q6_K.gguf) | Q6_K | Very good quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.Q8_0.gguf) | Q8_0 | Best quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-GGUF-i3/resolve/main/nope-edge.f16.gguf) | f16 | Full precision, don't bother; use a quant | + +## Note from Flexan + +I provide GGUFs and quantizations of publicly available models that do not have a GGUF equivalent available yet, +usually for models **I deem interesting and wish to try out**. + +If there are some quants missing that you'd like me to add, you may request one in the community tab. +If you want to request a public model to be converted, you can also request that in the community tab. +If you have questions regarding this model, please refer to [the original model repo](https://huggingface.co/nopenet/nope-edge). + +You can find more info about me and what I do [here](https://huggingface.co/Flexan/Flexan). + +# NOPE Edge - Crisis Classification Model + +A fine-tuned model for detecting crisis signals in text - suicidal ideation, self-harm, abuse, violence, and other safety-critical content. Features chain-of-thought reasoning that explains its classifications. + +> **License:** [NOPE Edge Community License v1.0](LICENSE.md) - Free for research, academic, nonprofit, and evaluation use. Commercial production requires a separate license. See [nope.net/edge](https://nope.net/edge) for details. + +--- + +## Model Variants + +| Model | Parameters | Use Case | +|-------|------------|----------| +| **[nope-edge](https://huggingface.co/nopenet/nope-edge)** | 4B | Maximum accuracy | +| **[nope-edge-mini](https://huggingface.co/nopenet/nope-edge-mini)** | 1.7B | High-volume, cost-sensitive | + +This is **nope-edge (4B)**. + +--- + +## Quick Start + +### Requirements + +- Python 3.10+ +- GPU with 8GB+ VRAM (e.g., RTX 3070, A10G, L4) - or CPU (slower) +- ~8GB disk space + +```bash +pip install torch transformers accelerate +``` + +### Usage + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch +import re + +model_id = "nopenet/nope-edge" + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained( + model_id, + torch_dtype=torch.bfloat16, + device_map="auto" +) + +def classify(message: str) -> str: + """Returns XML with reflection and risk classification.""" + input_ids = tokenizer.apply_chat_template( + [{"role": "user", "content": message}], + tokenize=True, + return_tensors="pt", + add_generation_prompt=True + ).to(model.device) + + with torch.no_grad(): + output = model.generate(input_ids, max_new_tokens=300, do_sample=False) + + return tokenizer.decode( + output[0][input_ids.shape[1]:], + skip_special_tokens=True + ).strip() + +# Example +result = classify("I want to end it all tonight") +print(result) +``` + +**Output:** +```xml +The user directly expresses intent to end their life with a specific timeline ("tonight"), indicating acute suicidal ideation with imminent risk. + + + +``` + +--- + +## Output Format + +The model outputs XML with two components: + +### 1. Reflection (Chain-of-Thought) + +```xml +Reasoning about the input... +``` + +The model explains its classification, including: +- What signals it detected +- Why it chose the risk type and severity +- Any contextual factors considered + +### 2. Risk Classification + +**Crisis detected:** +```xml + + + +``` + +**No crisis:** +```xml + +``` + +### Risk Attributes + +| Attribute | Values | Description | +|-----------|--------|-------------| +| `subject` | `self`, `other` | Who is at risk | +| `type` | `suicide`, `self_harm`, `self_neglect`, `violence`, `abuse`, `sexual_violence`, `exploitation`, `stalking`, `neglect` | Risk category | +| `severity` | `mild`, `moderate`, `high`, `critical` | Urgency level | +| `imminence` | `chronic`, `acute`, `urgent`, `emergency` | Time sensitivity | +| `features` | comma-separated list | Specific indicators detected | + +### Subject Attribution + +| Subject | Meaning | Example | +|---------|---------|---------| +| `self` | The speaker is at risk | "I want to kill myself" | +| `other` | Reporting concern about someone else | "My friend said she wants to die" | + +### Parsing Example + +```python +import re +from dataclasses import dataclass +from typing import Optional + +@dataclass +class Risk: + subject: str + type: str + severity: str + imminence: Optional[str] = None + features: Optional[list] = None + +def parse_output(output: str) -> dict: + """Parse model output into structured data.""" + result = { + "reflection": None, + "risks": [], + "is_crisis": False + } + + # Extract reflection + reflection_match = re.search(r'(.*?)', output, re.DOTALL) + if reflection_match: + result["reflection"] = reflection_match.group(1).strip() + + # Check for empty risks (no crisis) + if '' in output or '' in output: + return result + + # Extract risk elements + risk_pattern = r']+)/?\s*>' + for match in re.finditer(risk_pattern, output): + attrs = {} + for attr_match in re.finditer(r'(\w+)="([^"]*)"', match.group(1)): + attrs[attr_match.group(1)] = attr_match.group(2) + + if attrs: + risk = Risk( + subject=attrs.get("subject", "self"), + type=attrs.get("type"), + severity=attrs.get("severity"), + imminence=attrs.get("imminence"), + features=attrs.get("features", "").split(",") if attrs.get("features") else None + ) + result["risks"].append(risk) + result["is_crisis"] = True + + return result + +# Usage +output = classify("I want to end it all tonight") +parsed = parse_output(output) +print(f"Crisis: {parsed['is_crisis']}") +print(f"Reasoning: {parsed['reflection']}") +for risk in parsed['risks']: + print(f"Risk: {risk.type}/{risk.severity} ({risk.subject})") +``` + +--- + +## Examples + +### Crisis Detection + +**Input:** "I want to end it all tonight" +```xml +The user directly expresses intent to end their life with a specific timeline ("tonight"), indicating acute suicidal ideation with imminent risk. + + + +``` + +**Input:** "My friend told me she's been cutting herself" +```xml +The user is reporting concern about a friend engaging in self-harm behavior. This is third-party disclosure requiring attention. + + + +``` + +### No Crisis (Correctly Ignored) + +**Input:** "kms lmao this exam is killing me" +```xml +The user is using hyperbolic internet slang ("kms" = "kill myself") to express frustration about an exam. The "lmao" and casual context indicate this is not genuine suicidal ideation. + +``` + +**Input:** "I used to be suicidal but therapy helped me recover" +```xml +The user is sharing a recovery narrative about past suicidal ideation. They explicitly state therapy helped and they have recovered. No current crisis indicators. + +``` + +--- + +## Input Best Practices + +### Text Preprocessing + +**Preserve natural prose.** The model was trained on real conversations with authentic expression: + +| Keep | Why | +|------|-----| +| Emojis | Emotional signals matter | +| Punctuation intensity | "I can't do this!!!" vs "I can't do this" | +| Slang/algospeak | "kms", "unalive", "catch the bus", "graped" | +| Casual spelling | "im so done" - don't normalize | + +**Only remove:** Zero-width Unicode, decorative fonts, excessive whitespace. + +### Multi-Turn Conversations + +Serialize into a single user message: + +```python +conversation = """User: How are you? +Assistant: I'm here to help. How are you feeling? +User: Not great. I've been thinking about ending it all.""" + +messages = [{"role": "user", "content": conversation}] +``` + +--- + +## Production Deployment + +For high-throughput use, deploy with vLLM or SGLang: + +```bash +# SGLang (recommended) +pip install sglang +python -m sglang.launch_server \ + --model nopenet/nope-edge \ + --dtype bfloat16 --port 8000 + +# vLLM +pip install vllm +python -m vllm.entrypoints.openai.api_server \ + --model nopenet/nope-edge \ + --dtype bfloat16 --max-model-len 2048 --port 8000 +``` + +Then call as OpenAI-compatible API: + +```bash +curl http://localhost:8000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "nopenet/nope-edge", + "messages": [{"role": "user", "content": "I want to end it all"}], + "max_tokens": 300, "temperature": 0 + }' +``` + +--- + +## Model Details + +| | | +|---|---| +| **Parameters** | 4B | +| **Precision** | bfloat16 | +| **Base Model** | Qwen/Qwen3-4B | +| **Method** | LoRA fine-tune, merged to full weights | +| **License** | [NOPE Edge Community License v1.0](LICENSE.md) | + +--- + +## Risk Types Detected + +| Type | Description | Clinical Framework | +|------|-------------|-------------------| +| `suicide` | Suicidal ideation, intent, planning | C-SSRS | +| `self_harm` | Non-suicidal self-injury (NSSI) | - | +| `self_neglect` | Eating disorders, medical neglect | - | +| `violence` | Threats/intent to harm others | HCR-20 | +| `abuse` | Domestic/intimate partner violence | DASH | +| `sexual_violence` | Rape, sexual assault, coercion | - | +| `neglect` | Failing to care for dependent | - | +| `exploitation` | Trafficking, grooming, sextortion | - | +| `stalking` | Persistent unwanted contact | SAM | + +--- + +## Important Limitations + +- Outputs are **probabilistic signals**, not clinical assessments +- **False negatives and false positives will occur** +- Never use as the **sole basis** for intervention decisions +- Always implement **human review** for flagged content +- This model is **not** a medical device or substitute for professional judgment +- Not validated for all populations, languages, or cultural contexts + +--- + +## Commercial Licensing + +This model is free for research, academic, nonprofit, and evaluation use. + +**For commercial production deployment**, contact us: +- Email: support@nope.net +- Website: https://nope.net/edge + +--- + +## About NOPE + +NOPE provides safety infrastructure for AI applications. Our API helps developers detect mental health crises and harmful AI behavior in real-time. + +- **Website:** https://nope.net +- **Documentation:** https://docs.nope.net +- **Support:** support@nope.net + +--- + +## NOPE Edge Community License v1.0 + +Copyright (c) 2026 NopeNet, LLC. All rights reserved. + +### Permitted Uses + +You may use this Model for: + +- **Research and academic purposes** - published or unpublished studies +- **Personal projects** - non-commercial individual use +- **Nonprofit organizations** - including crisis lines, mental health organizations, and safety-focused NGOs +- **Evaluation and development** - testing integration before commercial licensing +- **Benchmarking** - publishing evaluations with attribution + +### Commercial Use + +**Commercial use requires a separate license.** Commercial use includes production deployment in revenue-generating products or use by for-profit companies beyond evaluation. + +Contact support@nope.net or visit https://nope.net/edge for commercial licensing. + +### Restrictions + +You may NOT: redistribute or share weights; sublicense, sell, or transfer the Model; create derivative models for redistribution; build a competing crisis classification product. + +### No Warranty + +THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTIES. False negatives and false positives will occur. This is not a medical device or substitute for professional judgment. + +### Limitation of Liability + +NopeNet shall not be liable for damages arising from use, including classification errors or harm to any person. + +### Base Model + +Built on [Qwen3](https://huggingface.co/Qwen) by Alibaba Cloud (Apache 2.0). See NOTICE.md. \ No newline at end of file diff --git a/nope-edge.IQ3_M.gguf b/nope-edge.IQ3_M.gguf new file mode 100644 index 0000000..c69c588 --- /dev/null +++ b/nope-edge.IQ3_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26cd5b90b52d303a303ec369a816d5ed937159618fc7c9c244787ef4de822334 +size 1962896288 diff --git a/nope-edge.IQ3_S.gguf b/nope-edge.IQ3_S.gguf new file mode 100644 index 0000000..324614a --- /dev/null +++ b/nope-edge.IQ3_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6adb0cd75e431785d425fdbdffdb1c41cb1a528383edf27b8fc8e4a610fce750 +size 1899531168 diff --git a/nope-edge.IQ4_XS.gguf b/nope-edge.IQ4_XS.gguf new file mode 100644 index 0000000..8d90946 --- /dev/null +++ b/nope-edge.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f568f556b06e752c4867e895bd7c6cdb3a919167e43103f1296c891e34bd2d3e +size 2286316448 diff --git a/nope-edge.Q2_K.gguf b/nope-edge.Q2_K.gguf new file mode 100644 index 0000000..75298f4 --- /dev/null +++ b/nope-edge.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa8a8f65f7c9c140c8814e8af9d0d652f0a4d8bc203982697284c04d008464bb +size 1669499808 diff --git a/nope-edge.Q3_K_L.gguf b/nope-edge.Q3_K_L.gguf new file mode 100644 index 0000000..b14440e --- /dev/null +++ b/nope-edge.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d9cda5108a6d2ba792cd51f1c9e3b6b76c52a9c7e5f991af74273d092d9ee2b +size 2239785888 diff --git a/nope-edge.Q3_K_M.gguf b/nope-edge.Q3_K_M.gguf new file mode 100644 index 0000000..7f27e5a --- /dev/null +++ b/nope-edge.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddd8765cbf675de0759204737ecbf051113ce3784b24d7f20c724cb9b61de508 +size 2075618208 diff --git a/nope-edge.Q3_K_S.gguf b/nope-edge.Q3_K_S.gguf new file mode 100644 index 0000000..d69e7ef --- /dev/null +++ b/nope-edge.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1099e114d1c6c83abb116316b25adfe5664f03169693ddc4b4d2e3cfda5baec2 +size 1886997408 diff --git a/nope-edge.Q4_K_M.gguf b/nope-edge.Q4_K_M.gguf new file mode 100644 index 0000000..593ec4d --- /dev/null +++ b/nope-edge.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4423fb328db590099dd1a97db39bf848f5983f3706884427523a8db2afd604a6 +size 2497280928 diff --git a/nope-edge.Q4_K_S.gguf b/nope-edge.Q4_K_S.gguf new file mode 100644 index 0000000..6780008 --- /dev/null +++ b/nope-edge.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9ad4336868f0ef14a016a574cc91f7bc3e324796a610fb435854bedb9990b69 +size 2383309728 diff --git a/nope-edge.Q5_K_M.gguf b/nope-edge.Q5_K_M.gguf new file mode 100644 index 0000000..8af9df9 --- /dev/null +++ b/nope-edge.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d00c9175b20928b51d7bb804f1c9ab2e7ede07e72c1ce91628fbb1da23539f85 +size 2889513888 diff --git a/nope-edge.Q5_K_S.gguf b/nope-edge.Q5_K_S.gguf new file mode 100644 index 0000000..102e205 --- /dev/null +++ b/nope-edge.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ef0366f9bd3b0c715946c27ca25d2569d7b5a1e8c6c39016860f4c825b9306f +size 2823711648 diff --git a/nope-edge.Q6_K.gguf b/nope-edge.Q6_K.gguf new file mode 100644 index 0000000..57755e0 --- /dev/null +++ b/nope-edge.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c64d01eda29d6d6f96e9cbcbdd98ebab967841b6a7dd5ea53152305144c1e0c +size 3306261408 diff --git a/nope-edge.Q8_0.gguf b/nope-edge.Q8_0.gguf new file mode 100644 index 0000000..fdcaffe --- /dev/null +++ b/nope-edge.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c43ffe873080db5a676fb6e1be2f8535d0ff832f0ee4ba0df2aeda1a92d64d5 +size 4280405408 diff --git a/nope-edge.f16.gguf b/nope-edge.f16.gguf new file mode 100644 index 0000000..9d7fd26 --- /dev/null +++ b/nope-edge.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a237010c56a1c43a8e5de9d8ad8f31d973122dc6f6db5c92722bd5b21e623693 +size 8051285408