From cc61ef51198a08f0d44446f0823155ec328d6b79 Mon Sep 17 00:00:00 2001 From: ModelHub XC Date: Sat, 11 Apr 2026 10:01:04 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=8C=E7=94=B1ModelHub=20XC=E7=A4=BE=E5=8C=BA=E6=8F=90?= =?UTF-8?q?=E4=BE=9B=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Model: Flexan/nopenet-nope-edge-mini-GGUF-i1 Source: Original Platform --- .gitattributes | 50 +++++ README.md | 433 +++++++++++++++++++++++++++++++++++++ nope-edge-mini.IQ3_M.gguf | 3 + nope-edge-mini.IQ3_S.gguf | 3 + nope-edge-mini.IQ3_XS.gguf | 3 + nope-edge-mini.IQ4_XS.gguf | 3 + nope-edge-mini.Q2_K.gguf | 3 + nope-edge-mini.Q3_K_L.gguf | 3 + nope-edge-mini.Q3_K_M.gguf | 3 + nope-edge-mini.Q3_K_S.gguf | 3 + nope-edge-mini.Q4_K_M.gguf | 3 + nope-edge-mini.Q4_K_S.gguf | 3 + nope-edge-mini.Q5_K_M.gguf | 3 + nope-edge-mini.Q5_K_S.gguf | 3 + nope-edge-mini.Q6_K.gguf | 3 + nope-edge-mini.Q8_0.gguf | 3 + nope-edge-mini.f16.gguf | 3 + 17 files changed, 528 insertions(+) create mode 100644 .gitattributes create mode 100644 README.md create mode 100644 nope-edge-mini.IQ3_M.gguf create mode 100644 nope-edge-mini.IQ3_S.gguf create mode 100644 nope-edge-mini.IQ3_XS.gguf create mode 100644 nope-edge-mini.IQ4_XS.gguf create mode 100644 nope-edge-mini.Q2_K.gguf create mode 100644 nope-edge-mini.Q3_K_L.gguf create mode 100644 nope-edge-mini.Q3_K_M.gguf create mode 100644 nope-edge-mini.Q3_K_S.gguf create mode 100644 nope-edge-mini.Q4_K_M.gguf create mode 100644 nope-edge-mini.Q4_K_S.gguf create mode 100644 nope-edge-mini.Q5_K_M.gguf create mode 100644 nope-edge-mini.Q5_K_S.gguf create mode 100644 nope-edge-mini.Q6_K.gguf create mode 100644 nope-edge-mini.Q8_0.gguf create mode 100644 nope-edge-mini.f16.gguf diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..f5e26a6 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,50 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.f16.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.IQ3_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +nope-edge-mini.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000..4e90009 --- /dev/null +++ b/README.md @@ -0,0 +1,433 @@ +--- +license: other +license_name: nope-edge-community-license-v1.0 +license_link: LICENSE.md +language: + - en +tags: + - safety + - crisis-detection + - text-classification + - mental-health + - content-safety + - suicide-prevention +base_model: nopenet/nope-edge-mini +pipeline_tag: text-generation +library_name: transformers +extra_gated_heading: "Access NOPE Edge" +extra_gated_description: "This model is available for **research, academic, nonprofit, and evaluation use**. Commercial production use requires a separate license. Please read the [license terms below](#nope-edge-community-license-v10) before downloading." +extra_gated_button_content: "Agree and download" +extra_gated_fields: + I am using this for research, academic, nonprofit, personal, or evaluation purposes: + type: checkbox + I agree to the NOPE Edge Community License v1.0: + type: checkbox +--- + +# GGUF Files for nope-edge-mini + +These are the GGUF files for [nopenet/nope-edge-mini](https://huggingface.co/nopenet/nope-edge-mini). + +> [!NOTE] +> **Note:** This is the **first iteration/revision** of this model. A revision is made when a model repo gets updated with a new model. +> +> [[second iteration (2)](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i2)] + +## Downloads + +| GGUF Link | Quantization | Description | +| ---- | ----- | ----------- | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q2_K.gguf) | Q2_K | Lowest quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.IQ3_XS.gguf) | IQ3_XS | Integer quant | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q3_K_S.gguf) | Q3_K_S | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.IQ3_S.gguf) | IQ3_S | Integer quant, preferable over Q3_K_S | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.IQ3_M.gguf) | IQ3_M | Integer quant | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q3_K_M.gguf) | Q3_K_M | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q3_K_L.gguf) | Q3_K_L | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.IQ4_XS.gguf) | IQ4_XS | Integer quant | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q4_K_S.gguf) | Q4_K_S | Fast with good performance | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q4_K_M.gguf) | Q4_K_M | **Recommended:** Perfect mix of speed and performance | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q5_K_S.gguf) | Q5_K_S | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q5_K_M.gguf) | Q5_K_M | | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q6_K.gguf) | Q6_K | Very good quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.Q8_0.gguf) | Q8_0 | Best quality | +| [Download](https://huggingface.co/Flexan/nopenet-nope-edge-mini-GGUF-i1/resolve/main/nope-edge-mini.f16.gguf) | f16 | Full precision, don't bother; use a quant | + +## Note from Flexan + +I provide GGUFs and quantizations of publicly available models that do not have a GGUF equivalent available yet. +This process is not yet automated and I download, convert, quantize, and upload them **by hand**, usually for models **I deem interesting and wish to try out**. + +If there are some quants missing that you'd like me to add, you may request one in the community tab. +If you want to request a public model to be converted, you can also request that in the community tab. +If you have questions regarding the model, please refer to the original model repo. + +# NOPE Edge Mini - Crisis Classification Model + +A fine-tuned model for detecting crisis signals in text - suicidal ideation, self-harm, abuse, violence, and other safety-critical content. Designed for integration into safety pipelines, content moderation systems, and mental health applications. + +> **License:** [NOPE Edge Community License v1.0](LICENSE.md) - Free for research, academic, nonprofit, and evaluation use. Commercial production requires a separate license. See [nope.net/edge](https://nope.net/edge) for details. + +--- + +## Model Variants + +| Model | Parameters | Accuracy | Latency | Use Case | +|-------|------------|----------|---------|----------| +| **[nope-edge](https://huggingface.co/nopenet/nope-edge)** | 4B | **90.6%** | ~750ms | Maximum accuracy | +| **[nope-edge-mini](https://huggingface.co/nopenet/nope-edge-mini)** | 1.7B | 85.9% | ~260ms | High-volume, cost-sensitive | + +This is **nope-edge-mini (1.7B)**. + +--- + +## Quick Start + +### Requirements + +- Python 3.10+ +- GPU with 4GB+ VRAM (e.g., RTX 3060, T4, L4) - or CPU (slower) +- ~4GB disk space + +```bash +pip install torch transformers accelerate +``` + +### Usage + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer +import torch + +model_id = "nopenet/nope-edge-mini" + +tokenizer = AutoTokenizer.from_pretrained(model_id) +model = AutoModelForCausalLM.from_pretrained( + model_id, + torch_dtype=torch.bfloat16, + device_map="auto" +) + +def classify(message: str) -> str: + """Returns 'type|severity|subject' or 'none'.""" + input_ids = tokenizer.apply_chat_template( + [{"role": "user", "content": message}], + tokenize=True, + return_tensors="pt", + add_generation_prompt=True + ).to(model.device) + + with torch.no_grad(): + output = model.generate(input_ids, max_new_tokens=30, do_sample=False) + + return tokenizer.decode( + output[0][input_ids.shape[1]:], + skip_special_tokens=True + ).strip() + +classify("I want to end it all") # -> "suicide|high|self" +classify("Great day at work!") # -> "none" +classify("My friend said she wants to kill herself") # -> "suicide|high|other" +``` + +--- + +## Output Format + +**Crisis detected:** +``` +{type}|{severity}|{subject} +``` + +| Field | Values | Description | +|-------|--------|-------------| +| type | `suicide`, `self_harm`, `self_neglect`, `violence`, `abuse`, `sexual_violence`, `exploitation`, `stalking`, `neglect` | Risk category | +| severity | `mild`, `moderate`, `high`, `critical` | Urgency level | +| subject | `self`, `other` | Who is at risk | + +**No crisis:** `none` + +### Subject Attribution + +| Subject | Meaning | Example | +|---------|---------|---------| +| `self` | The speaker is at risk or is the victim | "I want to kill myself", "My partner hits me" | +| `other` | The speaker is reporting concern about someone else | "My friend said she wants to die" | + +### Parsing Example + +```python +def parse_output(output: str) -> dict: + output = output.strip().lower() + if output == "none": + return {"is_crisis": False} + + parts = output.split("|") + return { + "is_crisis": True, + "type": parts[0] if len(parts) > 0 else None, + "severity": parts[1] if len(parts) > 1 else None, + "subject": parts[2] if len(parts) > 2 else None, + } +``` + +--- + +## Input Best Practices + +### Text Preprocessing + +**Preserve natural prose.** The model was trained on real conversations with authentic expression. Emotional signals matter: + +| Keep | Why | +|------|-----| +| Emojis | `πŸ’€` in "kms πŸ’€" signals irony; `😭` signals distress intensity | +| Punctuation intensity | "I can't do this!!!" conveys more urgency than "I can't do this" | +| Casual spelling | "im so done" vs "I'm so done" β€” both valid, don't normalize | +| Slang/algospeak | "kms", "unalive", "catch the bus" β€” model understands these | + +**Only remove:** + +| Remove | Example | +|--------|---------| +| Zero-width/invisible Unicode | `hello\u200bworld` β†’ `helloworld` | +| Decorative Unicode fonts | `ℐ π“Œπ’Άπ“ƒπ“‰ π“‰π‘œ 𝒹𝒾𝑒` β†’ `I want to die` | +| Newlines (single messages) | `I can't\ndo this` β†’ `I can't do this` | + +**Keep newlines** when they provide turn structure (see Multi-Turn Conversations below). + +**Examples:** + +```python +# KEEP - emotional signal matters +"I can't do this anymore 😭😭😭" # Keep emojis - signals distress +"i want to die!!!!!!!" # Keep punctuation - signals intensity +"kms lmao πŸ’€" # Keep all - irony/context signal + +# NORMALIZE - only structural/invisible issues +"ℐ π“Œπ’Άπ“ƒπ“‰ π“‰π‘œ 𝒹𝒾𝑒" β†’ "I want to die" # Fancy Unicode fonts +"I can't\ndo this\nanymore" β†’ "I can't do this anymore" # Single message +"hello\u200bworld" β†’ "helloworld" # Zero-width chars +``` + +**Minimal preprocessing function:** + +```python +import re +import unicodedata + +def preprocess(text: str) -> str: + # Normalize decorative Unicode fonts to ASCII (NFKC) + text = unicodedata.normalize('NFKC', text) + + # Remove zero-width and invisible characters + text = re.sub(r'[\u200b-\u200f\u2028-\u202f\u2060-\u206f\ufeff]', '', text) + + # Flatten newlines to spaces (for single messages only) + text = re.sub(r'\n+', ' ', text) + + # Collapse multiple spaces + text = re.sub(r' +', ' ', text) + + return text.strip() + +# NOTE: Do NOT remove emojis, punctuation, or "normalize" spelling +``` + +**Language considerations:** +- Model is English-primary but handles multilingual input +- Keep native scripts (Chinese, Arabic, Korean, etc.) intact +- Preserve natural punctuation and expression in all languages + +### Multi-Turn Conversations + +**The model was trained on pre-serialized transcripts, not native multi-turn chat format.** + +When classifying conversations, serialize into a single user message: + +```python +# CORRECT - serialize conversation into single message +conversation = """User: How are you? +Assistant: I'm here to help. How are you feeling? +User: Not great. I've been thinking about ending it all.""" + +messages = [{"role": "user", "content": conversation}] + +# WRONG - don't use multiple role/content pairs +messages = [ + {"role": "user", "content": "How are you?"}, + {"role": "assistant", "content": "I'm here to help..."}, + {"role": "user", "content": "Not great..."} +] # Model was NOT trained this way +``` + +**Why serialization matters:** +- Model treats all content equally (no user/assistant distinction) +- Trained on pre-serialized transcripts for consistent attention patterns +- Native multi-turn format causes the model to "chat" instead of classify + +**Flexible format - these all work:** + +```python +# Simple newlines +"User: message 1\nAssistant: message 2\nUser: message 3" + +# Markdown-style +"**User:** message 1\n**Assistant:** message 2" + +# Labeled +"{user}: message 1\n{assistant}: message 2" + +# XML-style +"message 1\nmessage 2" +``` + +The model is robust to formatting variations. Consistency matters more than specific format choice. + +### Input Length + +- **Single messages:** No preprocessing needed beyond character cleanup +- **Conversations:** For very long conversations (20+ turns), consider: + - Classifying a sliding window (last 10-15 turns) + - The model's attention may not span extremely long contexts effectively + - Deep needle detection (crisis buried in turn 3 of 25) is a known limitation + +--- + +## Production Deployment + +For high-throughput production use, deploy with vLLM or SGLang: + +```bash +# vLLM +pip install vllm +python -m vllm.entrypoints.openai.api_server \ + --model nopenet/nope-edge-mini \ + --dtype bfloat16 --max-model-len 2048 --port 8000 + +# SGLang +pip install sglang +python -m sglang.launch_server \ + --model nopenet/nope-edge-mini \ + --dtype bfloat16 --port 8000 +``` + +Then call as OpenAI-compatible API: + +```bash +curl http://localhost:8000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "nopenet/nope-edge-mini", + "messages": [{"role": "user", "content": "I want to end it all"}], + "max_tokens": 30, "temperature": 0 + }' +``` + +| Setup | Throughput | Latency (p50) | +|-------|-----------|---------------| +| transformers | ~8 req/sec | ~180ms | +| vLLM / SGLang | 50-100+ req/sec | ~50ms | + +--- + +## Model Details + +| | | +|---|---| +| **Parameters** | 1.7B | +| **Precision** | bfloat16 | +| **Base Model** | Qwen/Qwen3-1.7B | +| **Method** | LoRA fine-tune, merged to full weights | +| **License** | [NOPE Edge Community License v1.0](LICENSE.md) | + +--- + +## Risk Types Detected + +| Type | Description | Clinical Framework | +|------|-------------|-------------------| +| `suicide` | Suicidal ideation, intent, planning | C-SSRS | +| `self_harm` | Non-suicidal self-injury (NSSI) | - | +| `self_neglect` | Eating disorders, medical neglect | - | +| `violence` | Threats/intent to harm others | HCR-20 | +| `abuse` | Domestic/intimate partner violence | DASH | +| `sexual_violence` | Rape, sexual assault, coercion | - | +| `neglect` | Failing to care for dependent | - | +| `exploitation` | Trafficking, grooming, sextortion | - | +| `stalking` | Persistent unwanted contact | SAM | + +--- + +## Important Limitations + +- Outputs are **probabilistic signals**, not clinical assessments +- **False negatives and false positives will occur** +- Never use as the **sole basis** for intervention decisions +- Always implement **human review** for flagged content +- This model is **not** a medical device or substitute for professional judgment +- Not validated for all populations, languages, or cultural contexts + +--- + +## Commercial Licensing + +This model is free for research, academic, nonprofit, and evaluation use. + +**For commercial production deployment**, contact us: +- Email: support@nope.net +- Website: https://nope.net/edge + +Commercial licenses include: +- Production deployment rights +- Priority support +- Custom fine-tuning options +- SLA guarantees + +--- + +## About NOPE + +NOPE provides safety infrastructure for AI applications. Our API helps developers detect mental health crises and harmful AI behavior in real-time. + +- **Website:** https://nope.net +- **Documentation:** https://docs.nope.net +- **Support:** support@nope.net + +--- + +## NOPE Edge Community License v1.0 + +Copyright (c) 2026 NopeNet, LLC. All rights reserved. + +### Permitted Uses + +You may use this Model for: + +- **Research and academic purposes** - published or unpublished studies +- **Personal projects** - non-commercial individual use +- **Nonprofit organizations** - including crisis lines, mental health organizations, and safety-focused NGOs +- **Evaluation and development** - testing integration before commercial licensing +- **Benchmarking** - publishing evaluations with attribution + +### Commercial Use + +**Commercial use requires a separate license.** Commercial use includes production deployment in revenue-generating products or use by for-profit companies beyond evaluation. + +Contact support@nope.net or visit https://nope.net/edge for commercial licensing. + +### Restrictions + +You may NOT: redistribute or share weights; sublicense, sell, or transfer the Model; create derivative models for redistribution; build a competing crisis classification product. + +### No Warranty + +THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTIES. False negatives and false positives will occur. This is not a medical device or substitute for professional judgment. + +### Limitation of Liability + +NopeNet shall not be liable for damages arising from use, including classification errors or harm to any person. + +### Base Model + +Built on [Qwen3](https://huggingface.co/Qwen) by Alibaba Cloud (Apache 2.0). See NOTICE.md. \ No newline at end of file diff --git a/nope-edge-mini.IQ3_M.gguf b/nope-edge-mini.IQ3_M.gguf new file mode 100644 index 0000000..5ecbf66 --- /dev/null +++ b/nope-edge-mini.IQ3_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6304a880addaa0871e9cc2774d0a775be225c092a851ddc52c082ebc298d0b39 +size 1029366208 diff --git a/nope-edge-mini.IQ3_S.gguf b/nope-edge-mini.IQ3_S.gguf new file mode 100644 index 0000000..7e50535 --- /dev/null +++ b/nope-edge-mini.IQ3_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4993e17722e221c251a35ce2333af4a38b69b3e89bf9cdd28eaaf600f02377f3 +size 1000956352 diff --git a/nope-edge-mini.IQ3_XS.gguf b/nope-edge-mini.IQ3_XS.gguf new file mode 100644 index 0000000..9edbe5e --- /dev/null +++ b/nope-edge-mini.IQ3_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98bcfbabbaf621b44483882e5901c683ca025cf310b6589c84aee3466a2b75a +size 967926208 diff --git a/nope-edge-mini.IQ4_XS.gguf b/nope-edge-mini.IQ4_XS.gguf new file mode 100644 index 0000000..9c65795 --- /dev/null +++ b/nope-edge-mini.IQ4_XS.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8541fb26f11f0193f9db6e150430cdee24d27ffda60936916bec6796947c6106 +size 1181587904 diff --git a/nope-edge-mini.Q2_K.gguf b/nope-edge-mini.Q2_K.gguf new file mode 100644 index 0000000..ea951a6 --- /dev/null +++ b/nope-edge-mini.Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22d7404ff638f71033eaa08e1d695eb72cb55cfdc4bd0206205ccbdffc33f033 +size 879897024 diff --git a/nope-edge-mini.Q3_K_L.gguf b/nope-edge-mini.Q3_K_L.gguf new file mode 100644 index 0000000..284cade --- /dev/null +++ b/nope-edge-mini.Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc6d33216f895e4782d7a7b462ec2779cbc4d7e623d2b34cfcb8df2d65bf13fc +size 1137205696 diff --git a/nope-edge-mini.Q3_K_M.gguf b/nope-edge-mini.Q3_K_M.gguf new file mode 100644 index 0000000..fd21df3 --- /dev/null +++ b/nope-edge-mini.Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6754f84b068a8bc61ba501da3c197176eaf35260345f71e6379e6b1e0c5db552 +size 1073242560 diff --git a/nope-edge-mini.Q3_K_S.gguf b/nope-edge-mini.Q3_K_S.gguf new file mode 100644 index 0000000..a1aa8d5 --- /dev/null +++ b/nope-edge-mini.Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7b6ead2628301b80a66648bd8b8701ae68aad10c259dfcefdc40afa5439d89 +size 1000956352 diff --git a/nope-edge-mini.Q4_K_M.gguf b/nope-edge-mini.Q4_K_M.gguf new file mode 100644 index 0000000..b5f0ad5 --- /dev/null +++ b/nope-edge-mini.Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3430b4e86100f3c75d2d3abfe6fd0b09e83302ea6fdee8e98fb1796db07a803 +size 1282439616 diff --git a/nope-edge-mini.Q4_K_S.gguf b/nope-edge-mini.Q4_K_S.gguf new file mode 100644 index 0000000..d2842ad --- /dev/null +++ b/nope-edge-mini.Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3356766f166579f75538f3fa6f2f7193ee30903c9478e08e8f16246fcd3967b1 +size 1235220928 diff --git a/nope-edge-mini.Q5_K_M.gguf b/nope-edge-mini.Q5_K_M.gguf new file mode 100644 index 0000000..171c7bd --- /dev/null +++ b/nope-edge-mini.Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35a67c2f60d33b912c90ceaf29509bdbad8f297c412e3a8271fbc8b3c2182ca3 +size 1471805888 diff --git a/nope-edge-mini.Q5_K_S.gguf b/nope-edge-mini.Q5_K_S.gguf new file mode 100644 index 0000000..a41ba3c --- /dev/null +++ b/nope-edge-mini.Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c794ebba634cc4970214542c61215f899fbb036be8439334763cf129a6cd5e00 +size 1444510144 diff --git a/nope-edge-mini.Q6_K.gguf b/nope-edge-mini.Q6_K.gguf new file mode 100644 index 0000000..25f027a --- /dev/null +++ b/nope-edge-mini.Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55c71b5ad5335b0bbeff7a0055650a7ecdbc5c776db9d6caebc667a0ca9b57c7 +size 1673007552 diff --git a/nope-edge-mini.Q8_0.gguf b/nope-edge-mini.Q8_0.gguf new file mode 100644 index 0000000..10a7635 --- /dev/null +++ b/nope-edge-mini.Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:076556537c1e0f845774a2ee4ea41f1fb6659334ac92cd9bf0d67c0166222bf2 +size 2165039552 diff --git a/nope-edge-mini.f16.gguf b/nope-edge-mini.f16.gguf new file mode 100644 index 0000000..0eeb15b --- /dev/null +++ b/nope-edge-mini.f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61bb40ac0d049cd66705a2b30d2f224072c66daabd57ae2a54745c0b10ec73cc +size 4069679552