diff --git a/.gitattributes b/.gitattributes
index b7e9e3a..a3eb2c9 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,37 +1,49 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
-*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
-*.zstandard filter=lfs diff=lfs merge=lfs -text
-*.tfevents* filter=lfs diff=lfs merge=lfs -text
-*.db* filter=lfs diff=lfs merge=lfs -text
-*.ark* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.gguf* filter=lfs diff=lfs merge=lfs -text
-*.ggml filter=lfs diff=lfs merge=lfs -text
-*.llamafile* filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q5_1.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q4_1.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+Qwen2-0.5B-Instruct.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
diff --git a/Qwen2-0.5B-Instruct.Q2_K.gguf b/Qwen2-0.5B-Instruct.Q2_K.gguf
new file mode 100644
index 0000000..705d6d7
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q2_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:089973c8260b2bbcb5e2317824859cc9565ee8d900a38c255c26ae69aa1bae11
+size 338604448
diff --git a/Qwen2-0.5B-Instruct.Q3_K_L.gguf b/Qwen2-0.5B-Instruct.Q3_K_L.gguf
new file mode 100644
index 0000000..93e95d2
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q3_K_L.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a666242297cfbfc1ae195c7c5e7a479bfd5299d31fc8667060c8e211e0875fc9
+size 369355168
diff --git a/Qwen2-0.5B-Instruct.Q3_K_M.gguf b/Qwen2-0.5B-Instruct.Q3_K_M.gguf
new file mode 100644
index 0000000..634d9f5
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q3_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8da80cb7bb130bd2268a7045d9960ad8878f9842e1296d6e0db81dda1c7fea27
+size 355463584
diff --git a/Qwen2-0.5B-Instruct.Q3_K_S.gguf b/Qwen2-0.5B-Instruct.Q3_K_S.gguf
new file mode 100644
index 0000000..de95fe9
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q3_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bd4873cf115a5b0fb83fb0c1996c13582ee0acd928c4a62f2f3f542120c4c7b
+size 338260384
diff --git a/Qwen2-0.5B-Instruct.Q4_0.gguf b/Qwen2-0.5B-Instruct.Q4_0.gguf
new file mode 100644
index 0000000..166ede1
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q4_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ff925c3a2fa2947e5bcb36656c8f7e4f3e7bd619bc05d7b96ceb1aa8682ef20
+size 352151968
diff --git a/Qwen2-0.5B-Instruct.Q4_1.gguf b/Qwen2-0.5B-Instruct.Q4_1.gguf
new file mode 100644
index 0000000..356135b
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q4_1.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fe3e3de96f59a32d8851b5c5f944e0e271ac76236b8222c0c4d55b3f44236cab
+size 374516128
diff --git a/Qwen2-0.5B-Instruct.Q4_K_M.gguf b/Qwen2-0.5B-Instruct.Q4_K_M.gguf
new file mode 100644
index 0000000..4dd5c38
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q4_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a87836c67fa946a7575f3f761467d5ce28e53e067c57e5749ca86d2e2a630de
+size 397804960
diff --git a/Qwen2-0.5B-Instruct.Q4_K_S.gguf b/Qwen2-0.5B-Instruct.Q4_K_S.gguf
new file mode 100644
index 0000000..3245603
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q4_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9ef79c98caf255b58c2351c4f189fbc7e05d0673c83dc13d378330f2b332a016
+size 385468832
diff --git a/Qwen2-0.5B-Instruct.Q5_0.gguf b/Qwen2-0.5B-Instruct.Q5_0.gguf
new file mode 100644
index 0000000..91c5c16
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q5_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51094ab0dbcf121dbad464bf50db7f72267de3f0ef6040cbefddd811c07dbb14
+size 396880288
diff --git a/Qwen2-0.5B-Instruct.Q5_1.gguf b/Qwen2-0.5B-Instruct.Q5_1.gguf
new file mode 100644
index 0000000..52355c9
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q5_1.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d51d3cbb69769d8b3cd9b5264133dc756f4468178232b5ad3fa222466b5f32fd
+size 419244448
diff --git a/Qwen2-0.5B-Instruct.Q5_K_M.gguf b/Qwen2-0.5B-Instruct.Q5_K_M.gguf
new file mode 100644
index 0000000..086834e
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q5_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c599745c8e3ac4a4e2834ad38294dc08bc5fcaf5208167a022230e0a8a817be
+size 420083104
diff --git a/Qwen2-0.5B-Instruct.Q5_K_S.gguf b/Qwen2-0.5B-Instruct.Q5_K_S.gguf
new file mode 100644
index 0000000..8801609
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q5_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae21357c40bea658fdafab64303bb9f439e3a9f0dabbdaf61c6adf9c86212feb
+size 412707232
diff --git a/Qwen2-0.5B-Instruct.Q6_K.gguf b/Qwen2-0.5B-Instruct.Q6_K.gguf
new file mode 100644
index 0000000..c28deec
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q6_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7acce777a5b0104e5b40f9bd0cee9b1a6394477f687ba8bc0be4b47f143c372d
+size 505733536
diff --git a/Qwen2-0.5B-Instruct.Q8_0.gguf b/Qwen2-0.5B-Instruct.Q8_0.gguf
new file mode 100644
index 0000000..4b21364
--- /dev/null
+++ b/Qwen2-0.5B-Instruct.Q8_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bb911a69d0f2dd815a1afef0695256373d9a77f247bdf19460a0ce2d28df263
+size 531065248
diff --git a/README.md b/README.md
index 044ca00..210f4ff 100644
--- a/README.md
+++ b/README.md
@@ -1,47 +1,95 @@
---
-license: Apache License 2.0
-
-#model-type:
-##如 gpt、phi、llama、chatglm、baichuan 等
-#- gpt
-
-#domain:
-##如 nlp、cv、audio、multi-modal
-#- nlp
-
-#language:
-##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
-#- cn
-
-#metrics:
-##如 CIDEr、Blue、ROUGE 等
-#- CIDEr
-
-#tags:
-##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
-#- pretrained
-
-#tools:
-##如 vllm、fastchat、llamacpp、AdaSeq 等
-#- vllm
+license: apache-2.0
+language:
+- en
+pipeline_tag: text-generation
+tags:
+- chat
+base_model: Qwen/Qwen2-0.5B-Instruct
---
-### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
-#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
-SDK下载
-```bash
-#安装ModelScope
-pip install modelscope
+# Qwen2-0.5B-Instruct-GGUF
+This is quntized version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) created using llama.cpp
+
+## Model Description
+
+Qwen2 is the new series of Qwen large language models. For Qwen2, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters, including a Mixture-of-Experts model. This repo contains the instruction-tuned 0.5B Qwen2 model.
+
+Compared with the state-of-the-art opensource language models, including the previous released Qwen1.5, Qwen2 has generally surpassed most opensource models and demonstrated competitiveness against proprietary models across a series of benchmarks targeting for language understanding, language generation, multilingual capability, coding, mathematics, reasoning, etc.
+
+For more details, please refer to our [blog](https://qwenlm.github.io/blog/qwen2/), [GitHub](https://github.com/QwenLM/Qwen2), and [Documentation](https://qwen.readthedocs.io/en/latest/).
+
+
+## Model Details
+Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes.
+
+## Training details
+We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization.
+
+
+## Requirements
+The code of Qwen2 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error:
```
+KeyError: 'qwen2'
+```
+
+## Quickstart
+
+Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents.
+
```python
-#SDK模型下载
-from modelscope import snapshot_download
-model_dir = snapshot_download('QuantFactory/Qwen2-0.5B-Instruct-GGUF')
-```
-Git下载
-```
-#Git模型下载
-git clone https://www.modelscope.cn/QuantFactory/Qwen2-0.5B-Instruct-GGUF.git
+from transformers import AutoModelForCausalLM, AutoTokenizer
+device = "cuda" # the device to load the model onto
+
+model = AutoModelForCausalLM.from_pretrained(
+ "Qwen/Qwen2-0.5B-Instruct",
+ torch_dtype="auto",
+ device_map="auto"
+)
+tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
+
+prompt = "Give me a short introduction to large language model."
+messages = [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": prompt}
+]
+text = tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True
+)
+model_inputs = tokenizer([text], return_tensors="pt").to(device)
+
+generated_ids = model.generate(
+ model_inputs.input_ids,
+ max_new_tokens=512
+)
+generated_ids = [
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
+]
+
+response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
-
如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。
\ No newline at end of file +## Evaluation + +We briefly compare Qwen2-0.5B-Instruct with Qwen1.5-0.5B-Chat. The results are as follows: + +| Datasets | Qwen1.5-0.5B-Chat | **Qwen2-0.5B-Instruct** | Qwen1.5-1.8B-Chat | **Qwen2-1.5B-Instruct** | +| :--- | :---: | :---: | :---: | :---: | +| MMLU | 35.0 | **37.9** | 43.7 | **52.4** | +| HumanEval | 9.1 | **17.1** | 25.0 | **37.8** | +| GSM8K | 11.3 | **40.1** | 35.3 | **61.6** | +| C-Eval | 37.2 | **45.2** | 55.3 | **63.8** | +| IFEval (Prompt Strict-Acc.) | 14.6 | **20.0** | 16.8 | **29.0** | + +## Original Model Citation + +If you find our work helpful, feel free to give us a cite. + +``` +@article{qwen2, + title={Qwen2 Technical Report}, + year={2024} +} +``` \ No newline at end of file diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file