diff --git a/.gitattributes b/.gitattributes
index 11a9c76..32b3d7f 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,39 +1,49 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
-*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
-*.zstandard filter=lfs diff=lfs merge=lfs -text
-*.tfevents* filter=lfs diff=lfs merge=lfs -text
-*.db* filter=lfs diff=lfs merge=lfs -text
-*.ark* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.gguf* filter=lfs diff=lfs merge=lfs -text
-*.ggml filter=lfs diff=lfs merge=lfs -text
-*.llamafile* filter=lfs diff=lfs merge=lfs -text
-*.pt2 filter=lfs diff=lfs merge=lfs -text
-*.gguf filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-f16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
+gemma-2-27b-it-f16-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index b302e9d..b7be28b 100644
--- a/README.md
+++ b/README.md
@@ -1,51 +1,86 @@
---
-frameworks:
-- other
-license: Apache License 2.0
-tasks:
-- text-generation
-
-#model-type:
-##如 gpt、phi、llama、chatglm、baichuan 等
-#- gpt
-
-#domain:
-##如 nlp、cv、audio、multi-modal
-#- nlp
-
-#language:
-##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
-#- cn
-
-#metrics:
-##如 CIDEr、Blue、ROUGE 等
-#- CIDEr
-
-#tags:
-##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
-#- pretrained
-
-#tools:
-##如 vllm、fastchat、llamacpp、AdaSeq 等
-#- vllm
+base_model: google/gemma-2-27b-it
+inference: false
+license: gemma
+library_name: transformers
+pipeline_tag: text-generation
+model_creator: Google
+model_name: gemma-2-27b-it
+quantized_by: Second State Inc.
+tags:
+- conversational
---
-### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
-#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
-SDK下载
-```bash
-#安装ModelScope
-pip install modelscope
-```
-```python
-#SDK模型下载
-from modelscope import snapshot_download
-model_dir = snapshot_download('second-state/gemma-2-27b-it-GGUF')
-```
-Git下载
-```
-#Git模型下载
-git clone https://www.modelscope.cn/second-state/gemma-2-27b-it-GGUF.git
-```
+
+
+
+

+
+
+
-如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。
\ No newline at end of file
+# Gemma-2-27b-it-GGUF
+
+## Original Model
+
+[google/gemma-2-27b-it](https://huggingface.co/google/gemma-2-27b-it)
+
+## Run with LlamaEdge
+
+- LlamaEdge version: [v0.12.1](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.12.1) and above
+
+- Prompt template
+
+ - Prompt type: `gemma-instruct`
+
+ - Prompt string
+
+ ```text
+ user
+ {user_message}
+ model
+ {model_message}model
+ ```
+
+- Context size: `8192`
+
+- Run as LlamaEdge service
+
+ ```bash
+ wasmedge --dir .:. --nn-preload default:GGML:AUTO:gemma-2-27b-it-Q5_K_M.gguf \
+ llama-api-server.wasm \
+ --prompt-template gemma-instruct \
+ --ctx-size 8192 \
+ --model-name gemma-2-27b
+ ```
+
+- Run as LlamaEdge command app
+
+ ```bash
+ wasmedge --dir .:. \
+ --nn-preload default:GGML:AUTO:gemma-2-27b-it-Q5_K_M.gguf \
+ llama-chat.wasm \
+ --prompt-template gemma-instruct \
+ --ctx-size 8192
+ ```
+
+## Quantized GGUF Models
+
+| Name | Quant method | Bits | Size | Use case |
+| ---- | ---- | ---- | ---- | ----- |
+| [gemma-2-27b-it-Q2_K.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q2_K.gguf) | Q2_K | 2 | 10.4 GB| smallest, significant quality loss - not recommended for most purposes |
+| [gemma-2-27b-it-Q3_K_L.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q3_K_L.gguf) | Q3_K_L | 3 | 14.5 GB| small, substantial quality loss |
+| [gemma-2-27b-it-Q3_K_M.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q3_K_M.gguf) | Q3_K_M | 3 | 13.4 GB| very small, high quality loss |
+| [gemma-2-27b-it-Q3_K_S.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q3_K_S.gguf) | Q3_K_S | 3 | 12.2 GB| very small, high quality loss |
+| [gemma-2-27b-it-Q4_0.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q4_0.gguf) | Q4_0 | 4 | 15.6 GB| legacy; small, very high quality loss - prefer using Q3_K_M |
+| [gemma-2-27b-it-Q4_K_M.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q4_K_M.gguf) | Q4_K_M | 4 | 16.6 GB| medium, balanced quality - recommended |
+| [gemma-2-27b-it-Q4_K_S.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q4_K_S.gguf) | Q4_K_S | 4 | 15.7 GB| small, greater quality loss |
+| [gemma-2-27b-it-Q5_0.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q5_0.gguf) | Q5_0 | 5 | 18.9 GB| legacy; medium, balanced quality - prefer using Q4_K_M |
+| [gemma-2-27b-it-Q5_K_M.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q5_K_M.gguf) | Q5_K_M | 5 | 19.4 GB| large, very low quality loss - recommended |
+| [gemma-2-27b-it-Q5_K_S.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q5_K_S.gguf) | Q5_K_S | 5 | 18.9 GB| large, low quality loss - recommended |
+| [gemma-2-27b-it-Q6_K.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q6_K.gguf) | Q6_K | 6 | 22.3 GB| very large, extremely low quality loss |
+| [gemma-2-27b-it-Q8_0.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-Q8_0.gguf) | Q8_0 | 8 | 28.9 GB| very large, extremely low quality loss - not recommended |
+| [gemma-2-27b-it-f16-00001-of-00002.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-f16-00001-of-00002.gguf) | f16 | 16 | 29.9 GB| |
+| [gemma-2-27b-it-f16-00002-of-00002.gguf](https://huggingface.co/second-state/gemma-2-27b-it-GGUF/blob/main/gemma-2-27b-it-f16-00002-of-00002.gguf) | f16 | 16 | 24.6 GB| |
+
+*Quantized with llama.cpp b3259*
\ No newline at end of file
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..2f1eb57
--- /dev/null
+++ b/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "Gemma2ForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "attn_logit_softcapping": 50.0,
+ "bos_token_id": 2,
+ "cache_implementation": "hybrid",
+ "eos_token_id": 1,
+ "final_logit_softcapping": 30.0,
+ "head_dim": 128,
+ "hidden_act": "gelu_pytorch_tanh",
+ "hidden_activation": "gelu_pytorch_tanh",
+ "hidden_size": 4608,
+ "initializer_range": 0.02,
+ "intermediate_size": 36864,
+ "max_position_embeddings": 8192,
+ "model_type": "gemma2",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 46,
+ "num_key_value_heads": 16,
+ "pad_token_id": 0,
+ "query_pre_attn_scalar": 144,
+ "rms_norm_eps": 1e-06,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "sliding_window_size": 4096,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.42.0.dev0",
+ "use_cache": true,
+ "vocab_size": 256000
+}
diff --git a/gemma-2-27b-it-Q2_K.gguf b/gemma-2-27b-it-Q2_K.gguf
new file mode 100644
index 0000000..395258f
--- /dev/null
+++ b/gemma-2-27b-it-Q2_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74573055f1f2083cd18c29c83766b6cc216b75a35e8236c6fec27ebe7ad2514b
+size 10449576032
diff --git a/gemma-2-27b-it-Q3_K_L.gguf b/gemma-2-27b-it-Q3_K_L.gguf
new file mode 100644
index 0000000..df2b4d7
--- /dev/null
+++ b/gemma-2-27b-it-Q3_K_L.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d476534b00c3f09ff5c11ccffad7789b216176fbc99d486a3a151d16473b3f8a
+size 14519361632
diff --git a/gemma-2-27b-it-Q3_K_M.gguf b/gemma-2-27b-it-Q3_K_M.gguf
new file mode 100644
index 0000000..45937ef
--- /dev/null
+++ b/gemma-2-27b-it-Q3_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92e7ce3e26137e2d9559cee01229226852655be01229d967f4d8496bca5cef0f
+size 13424648288
diff --git a/gemma-2-27b-it-Q3_K_S.gguf b/gemma-2-27b-it-Q3_K_S.gguf
new file mode 100644
index 0000000..e7cfd8f
--- /dev/null
+++ b/gemma-2-27b-it-Q3_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2c86da06c74bbeed911ad3cd4d876ed21ee9618ef3af41f0e8aee42dfab1343
+size 12169060448
diff --git a/gemma-2-27b-it-Q4_K_M.gguf b/gemma-2-27b-it-Q4_K_M.gguf
new file mode 100644
index 0000000..2417471
--- /dev/null
+++ b/gemma-2-27b-it-Q4_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb052c865ed92957c7a6820d028ce39297f33d9a86c77fed7c7cd463785cb95b
+size 16645382240
diff --git a/gemma-2-27b-it-Q4_K_S.gguf b/gemma-2-27b-it-Q4_K_S.gguf
new file mode 100644
index 0000000..dfd8e28
--- /dev/null
+++ b/gemma-2-27b-it-Q4_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef65ae05888d5ea3fc0b44ca5f34c14b7fce87e561268f3aae231a9210a6fe4f
+size 15739265120
diff --git a/gemma-2-27b-it-Q5_0.gguf b/gemma-2-27b-it-Q5_0.gguf
new file mode 100644
index 0000000..3096092
--- /dev/null
+++ b/gemma-2-27b-it-Q5_0.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cac4390d785c7113b1e89fd0d20b0799d97771068ec22e0f8051baa574d2c6d
+size 18884206688
diff --git a/gemma-2-27b-it-Q5_K_M.gguf b/gemma-2-27b-it-Q5_K_M.gguf
new file mode 100644
index 0000000..25b90cc
--- /dev/null
+++ b/gemma-2-27b-it-Q5_K_M.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9240c09c544a518ea51b4358dd7b91ac6deda22deeacf2ebcf0b4b95fa5ec04
+size 19408117856
diff --git a/gemma-2-27b-it-Q5_K_S.gguf b/gemma-2-27b-it-Q5_K_S.gguf
new file mode 100644
index 0000000..6c74449
--- /dev/null
+++ b/gemma-2-27b-it-Q5_K_S.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:925151ff3173773fe1ea8152c58d38f1a4ea10b10afbff9e02d01ac9bd4d3863
+size 18884206688
diff --git a/gemma-2-27b-it-Q6_K.gguf b/gemma-2-27b-it-Q6_K.gguf
new file mode 100644
index 0000000..9824c08
--- /dev/null
+++ b/gemma-2-27b-it-Q6_K.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91efe1ff49f437fa493da1a7712cf9a9a7dacc8157f12b380d87806fb4dbdb90
+size 22343524448
diff --git a/gemma-2-27b-it-f16-00001-of-00002.gguf b/gemma-2-27b-it-f16-00001-of-00002.gguf
new file mode 100644
index 0000000..6237c7d
--- /dev/null
+++ b/gemma-2-27b-it-f16-00001-of-00002.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ab4bffc57f5fb61e4cd11cd087aa4fe55aac097a976c7043edb5ad824a48202
+size 29885958144
diff --git a/gemma-2-27b-it-f16-00002-of-00002.gguf b/gemma-2-27b-it-f16-00002-of-00002.gguf
new file mode 100644
index 0000000..c5f2298
--- /dev/null
+++ b/gemma-2-27b-it-f16-00002-of-00002.gguf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3990b5bdf739ed56f245a3e4de261b4a50e551355173fc7c80de93d43041386
+size 24576063776