diff --git a/.gitattributes b/.gitattributes
index 53d7257..07d7a83 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,47 +1,49 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
-*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
-*.zstandard filter=lfs diff=lfs merge=lfs -text
-*.tfevents* filter=lfs diff=lfs merge=lfs -text
-*.db* filter=lfs diff=lfs merge=lfs -text
-*.ark* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.gguf* filter=lfs diff=lfs merge=lfs -text
-*.ggml filter=lfs diff=lfs merge=lfs -text
-*.llamafile* filter=lfs diff=lfs merge=lfs -text
-*.pt2 filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+Dolphin3.0-Qwen2.5-3b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
diff --git a/Dolphin3.0-Qwen2.5-3b.Q2_K.gguf b/Dolphin3.0-Qwen2.5-3b.Q2_K.gguf
new file mode 100644
index 0000000..b4108f2
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q2_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bc069602a60ec2bcd134a8f65b19f3d1249821ae2b080e7ec61634ec1127327
+size 1376859904
diff --git a/Dolphin3.0-Qwen2.5-3b.Q3_K_L.gguf b/Dolphin3.0-Qwen2.5-3b.Q3_K_L.gguf
new file mode 100644
index 0000000..6bb9717
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q3_K_L.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37f85651b9eeffd7ed5ae2c8abb7cf8d03ce3156f714ea59a33e1b78764091cc
+size 1841098496
diff --git a/Dolphin3.0-Qwen2.5-3b.Q3_K_M.gguf b/Dolphin3.0-Qwen2.5-3b.Q3_K_M.gguf
new file mode 100644
index 0000000..0aa909c
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q3_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88b5a8c702072f1349f0812af51a16b23cda3b1a3c97260bd5456e670b96f26d
+size 1724182272
diff --git a/Dolphin3.0-Qwen2.5-3b.Q3_K_S.gguf b/Dolphin3.0-Qwen2.5-3b.Q3_K_S.gguf
new file mode 100644
index 0000000..32a11a4
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q3_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2035a20f2e9ed7d5120a4368485ba9e9d9cedab1426ef411e844c7d9f8b2a0f
+size 1588064000
diff --git a/Dolphin3.0-Qwen2.5-3b.Q4_0.gguf b/Dolphin3.0-Qwen2.5-3b.Q4_0.gguf
new file mode 100644
index 0000000..07d5f05
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q4_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c516e6eb0379e5fae430e187be69ce629c0ac47c9b565f1027eab26227e300eb
+size 1997883136
diff --git a/Dolphin3.0-Qwen2.5-3b.Q4_1.gguf b/Dolphin3.0-Qwen2.5-3b.Q4_1.gguf
new file mode 100644
index 0000000..ba273c7
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q4_1.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:025264efc97a81868a288d281b5e353a354a64d5e91be7143fe7656177545b8d
+size 2190739200
diff --git a/Dolphin3.0-Qwen2.5-3b.Q4_K_M.gguf b/Dolphin3.0-Qwen2.5-3b.Q4_K_M.gguf
new file mode 100644
index 0000000..c11154e
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q4_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5344d1ddf6ea632d2aabd45d357de10de706f091bf8454557112086072d5af83
+size 2104936192
diff --git a/Dolphin3.0-Qwen2.5-3b.Q4_K_S.gguf b/Dolphin3.0-Qwen2.5-3b.Q4_K_S.gguf
new file mode 100644
index 0000000..a0f7582
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q4_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54c7190015c96aa89b285575c4fa10bfc46cc95af0b5fa847eec57f2fd2ec960
+size 2009417472
diff --git a/Dolphin3.0-Qwen2.5-3b.Q5_0.gguf b/Dolphin3.0-Qwen2.5-3b.Q5_0.gguf
new file mode 100644
index 0000000..1d23ca2
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q5_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edec052b6627b350779fa384c0b8542e4d9feeea03e39edaf88ef22c953c333b
+size 2383595264
diff --git a/Dolphin3.0-Qwen2.5-3b.Q5_1.gguf b/Dolphin3.0-Qwen2.5-3b.Q5_1.gguf
new file mode 100644
index 0000000..90c652e
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q5_1.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:11e044d385b662149c0cccf5443709e6bda5bf3d3cede67e07389517a88001dd
+size 2576451328
diff --git a/Dolphin3.0-Qwen2.5-3b.Q5_K_M.gguf b/Dolphin3.0-Qwen2.5-3b.Q5_K_M.gguf
new file mode 100644
index 0000000..59f3cd0
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q5_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00ac343390595521fadd44c60141ede65f6a4311d81a4d8a99ef673f536cccd9
+size 2438743808
diff --git a/Dolphin3.0-Qwen2.5-3b.Q5_K_S.gguf b/Dolphin3.0-Qwen2.5-3b.Q5_K_S.gguf
new file mode 100644
index 0000000..7f4f6ba
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q5_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7128c15115689b6a9ae465f12d45a7f15115904ffd162d07568fb519460678e
+size 2383595264
diff --git a/Dolphin3.0-Qwen2.5-3b.Q6_K.gguf b/Dolphin3.0-Qwen2.5-3b.Q6_K.gguf
new file mode 100644
index 0000000..65d8145
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q6_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f9815be2f9e3736657475522d0bbbf29bc6289c64cc4fd7bb62c1babce4c4f30
+size 2793414400
diff --git a/Dolphin3.0-Qwen2.5-3b.Q8_0.gguf b/Dolphin3.0-Qwen2.5-3b.Q8_0.gguf
new file mode 100644
index 0000000..98cf69e
--- /dev/null
+++ b/Dolphin3.0-Qwen2.5-3b.Q8_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8fdcb6b9ec16022b02f990c537defcb0f6a4872c20e28758be7e89b04c8217b
+size 3616091904
diff --git a/README.md b/README.md
index c381291..963d3a7 100644
--- a/README.md
+++ b/README.md
@@ -1,47 +1,134 @@
+
---
-license: Apache License 2.0
-#model-type:
-##如 gpt、phi、llama、chatglm、baichuan 等
-#- gpt
+license: other
+license_name: qwen-research
+license_link: https://huggingface.co/Qwen/Qwen2.5-3B/blob/main/LICENSE
+datasets:
+- OpenCoder-LLM/opc-sft-stage1
+- OpenCoder-LLM/opc-sft-stage2
+- microsoft/orca-agentinstruct-1M-v1
+- microsoft/orca-math-word-problems-200k
+- NousResearch/hermes-function-calling-v1
+- AI-MO/NuminaMath-CoT
+- AI-MO/NuminaMath-TIR
+- allenai/tulu-3-sft-mixture
+- cognitivecomputations/dolphin-coder
+- HuggingFaceTB/smoltalk
+- cognitivecomputations/samantha-data
+- m-a-p/CodeFeedback-Filtered-Instruction
+- m-a-p/Code-Feedback
+language:
+- en
+base_model:
+- Qwen/Qwen2.5-3B
-#domain:
-##如 nlp、cv、audio、multi-modal
-#- nlp
-
-#language:
-##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
-#- cn
-
-#metrics:
-##如 CIDEr、Blue、ROUGE 等
-#- CIDEr
-
-#tags:
-##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
-#- pretrained
-
-#tools:
-##如 vllm、fastchat、llamacpp、AdaSeq 等
-#- vllm
---
-### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
-#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
-SDK下载
-```bash
-#安装ModelScope
-pip install modelscope
+[](https://hf.co/QuantFactory)
+
+
+# QuantFactory/Dolphin3.0-Qwen2.5-3b-GGUF
+This is quantized version of [cognitivecomputations/Dolphin3.0-Qwen2.5-3b](https://huggingface.co/cognitivecomputations/Dolphin3.0-Qwen2.5-3b) created using llama.cpp
+
+# Original Model Card
+
+
+# Dolphin 3.0 Qwen 2.5 3B 🐬
+Part of the [Dolphin 3.0 Collection](https://huggingface.co/collections/cognitivecomputations/dolphin-30-677ab47f73d7ff66743979a3)
+
+Curated and trained by [Eric Hartford](https://huggingface.co/ehartford), [Ben Gitter](https://huggingface.co/bigstorm), [BlouseJury](https://huggingface.co/BlouseJury) and [Cognitive Computations](https://huggingface.co/cognitivecomputations)
+
+[](https://discord.gg/cognitivecomputations)
+Discord: https://discord.gg/cognitivecomputations
+
+
+
+## Sponsors
+Our appreciation for the generous sponsors of Dolphin 3.0:
+- [Crusoe Cloud](https://crusoe.ai/) - provided 16x L40s for training and evals
+- [Akash](https://akash.network/) - provided on-demand 8x H100 for training
+- [Lazarus](https://www.lazarusai.com/) - provided 16x H100 for training
+- [Cerebras](https://cerebras.ai/) - provided excellent and fast inference services for data labeling
+- [Andreessen Horowitz](https://a16z.com/) - provided a [grant](https://a16z.com/supporting-the-open-source-ai-community/) that make Dolphin 1.0 possible and enabled me to bootstrap my homelab
+
+## What is Dolphin?
+
+Dolphin 3.0 is the next generation of the Dolphin series of instruct-tuned models. Designed to be the ultimate general purpose local model, enabling coding, math, agentic, function calling, and general use cases.
+
+Dolphin aims to be a general purpose model, similar to the models behind ChatGPT, Claude, Gemini. But these models present problems for businesses seeking to include AI in their products.
+1) They maintain control of the system prompt, deprecating and changing things as they wish, often causing software to break.
+2) They maintain control of the model versions, sometimes changing things silently, or deprecating older models that your business relies on.
+3) They maintain control of the alignment, and in particular the alignment is one-size-fits all, not tailored to the application.
+4) They can see all your queries and they can potentially use that data in ways you wouldn't want.
+Dolphin, in contrast, is steerable and gives control to the system owner. You set the system prompt. You decide the alignment. You have control of your data. Dolphin does not impose its ethics or guidelines on you. You are the one who decides the guidelines.
+
+Dolphin belongs to YOU, it is your tool, an extension of your will.
+Just as you are personally responsible for what you do with a knife, gun, fire, car, or the internet, you are the creator and originator of any content you generate with Dolphin.
+
+https://erichartford.com/uncensored-models
+
+## Chat Template
+
+We use ChatML for the chat template.
+
```
-```python
-#SDK模型下载
-from modelscope import snapshot_download
-model_dir = snapshot_download('QuantFactory/Dolphin3.0-Qwen2.5-3b-GGUF')
-```
-Git下载
-```
-#Git模型下载
-git clone https://www.modelscope.cn/QuantFactory/Dolphin3.0-Qwen2.5-3b-GGUF.git
+<|im_start|>system
+You are Dolphin, a helpful AI assistant.<|im_end|>
+<|im_start|>user
+{prompt}<|im_end|>
+<|im_start|>assistant
```
-
如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。
\ No newline at end of file +## System Prompt + +In Dolphin, the system prompt is what you use to set the tone and alignment of the responses. You can set a character, a mood, rules for its behavior, and it will try its best to follow them. + +Make sure to set the system prompt in order to set the tone and guidelines for the responses - Otherwise, it will act in a default way that might not be what you want. + +Example use of system prompt: + +``` +<|im_start|>system +You are Dolphin, a golang coding assistant. you only code in golang. If the user requests any other programming language, return the solution in golang instead.<|im_end|> +<|im_start|>user +Please implement A* using python<|im_end|> +<|im_start|>assistant +``` + +## Sample Outputs + +TBD + +## How to use + +There are many ways to use a huggingface model including: +- ollama +- LM Studio +- Huggingface Transformers library +- vllm +- sglang +- tgi + +## Evals + +TBD + +## Appreciation + +Respect and thanks to the creators of the open source datasets that were used: +- [OpenCoder-LLM](https://huggingface.co/OpenCoder-LLM) (opc-sft-stage1, opc-sft-stage2) +- [microsoft](https://huggingface.co/OpenCoder-LLM) (orca-agentinstruct-1M-v1, orca-math-word-problems-200k) +- [NousResearch](https://huggingface.co/NousResearch) (hermes-function-calling-v1) +- [AI-MO](https://huggingface.co/AI-MO) (NuminaMath-CoT, NuminaMath-TIR) +- [allenai](https://huggingface.co/allenai) (tulu-3-sft-mixture) +- [HuggingFaceTB](https://huggingface.co/HuggingFaceTB) (smoltalk) +- [m-a-p](https://huggingface.co/m-a-p) (CodeFeedback-Filtered-Instruction, Code-Feedback) + +Special thanks to +- Meta, Qwen, and OpenCoder, who wrote papers and published models that were instrumental in creating Dolphin 3.0. +- [RLHFlow](https://huggingface.co/RLHFlow) for the excellent reward model used to filter the datasets +- Deepseek, for the ridiculously fast Deepseek-V3 that we used to augment the data. + + + diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..159097f --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "others", "allow_remote": true} \ No newline at end of file