diff --git a/.gitattributes b/.gitattributes index 11a9c76..a69016b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,39 +1,48 @@ *.7z filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text -*.bin.* filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text -*.zstandard filter=lfs diff=lfs merge=lfs -text -*.tfevents* filter=lfs diff=lfs merge=lfs -text -*.db* filter=lfs diff=lfs merge=lfs -text -*.ark* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.gguf* filter=lfs diff=lfs merge=lfs -text -*.ggml filter=lfs diff=lfs merge=lfs -text -*.llamafile* filter=lfs diff=lfs merge=lfs -text -*.pt2 filter=lfs diff=lfs merge=lfs -text -*.gguf filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Llama-3.2-1B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Llama-3.2-1B-Instruct-Q2_K.gguf b/Llama-3.2-1B-Instruct-Q2_K.gguf new file mode 100644 index 0000000..48146d6 --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cbccffcba1e679d2fb906a55b5d6d641aa21bff9ee20fe30cef6dd6b9f8e410 +size 580874080 diff --git a/Llama-3.2-1B-Instruct-Q3_K_L.gguf b/Llama-3.2-1B-Instruct-Q3_K_L.gguf new file mode 100644 index 0000000..8ed6e18 --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89a46e80c150f92bc0d5dfacb069f46770807664340339a22d886031ee06ddc +size 732524384 diff --git a/Llama-3.2-1B-Instruct-Q3_K_M.gguf b/Llama-3.2-1B-Instruct-Q3_K_M.gguf new file mode 100644 index 0000000..9ed912c --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bdd2550b78bdee5653da8f9d07d74faaafd7cc50602eac9107d8f2c60677d24 +size 690843488 diff --git a/Llama-3.2-1B-Instruct-Q3_K_S.gguf b/Llama-3.2-1B-Instruct-Q3_K_S.gguf new file mode 100644 index 0000000..dd8a14d --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf5363d4847936b37950b1d048199b616b80edd40e78ed15e7b0f2e36c32433b +size 641691488 diff --git a/Llama-3.2-1B-Instruct-Q4_K_M.gguf b/Llama-3.2-1B-Instruct-Q4_K_M.gguf new file mode 100644 index 0000000..eb8accb --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e95df2f5144fd67bf11421ad59812fbde45a79c90eb49cb5d79e3d7bddaa1331 +size 807694176 diff --git a/Llama-3.2-1B-Instruct-Q4_K_S.gguf b/Llama-3.2-1B-Instruct-Q4_K_S.gguf new file mode 100644 index 0000000..c63fc51 --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e256d51eed0799552f91a4fb8f0d05315b3e3dc1cd4499514e7d87a845951c88 +size 775647072 diff --git a/Llama-3.2-1B-Instruct-Q5_0.gguf b/Llama-3.2-1B-Instruct-Q5_0.gguf new file mode 100644 index 0000000..035e17f --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:922306ce6b89b731d20130c2a5e909be530c5703fd24b26f7d050bad3faf3fc5 +size 892563296 diff --git a/Llama-3.2-1B-Instruct-Q5_K_M.gguf b/Llama-3.2-1B-Instruct-Q5_K_M.gguf new file mode 100644 index 0000000..f3d855b --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cd16584b15af8c268ca60454b63b5021b0b2c7edf66cff2b41210a333b2233d +size 911503200 diff --git a/Llama-3.2-1B-Instruct-Q5_K_S.gguf b/Llama-3.2-1B-Instruct-Q5_K_S.gguf new file mode 100644 index 0000000..49de768 --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66260aca245c04327590457d52debf378e0fc93b881b2effb71f9a00703a1953 +size 892563296 diff --git a/Llama-3.2-1B-Instruct-Q6_K.gguf b/Llama-3.2-1B-Instruct-Q6_K.gguf new file mode 100644 index 0000000..beb192e --- /dev/null +++ b/Llama-3.2-1B-Instruct-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fe00ecc20650188a714bf1766cecbd938e6119e9311c8fe91ca3fb5f3246591 +size 1021800288 diff --git a/Llama-3.2-1B-Instruct-f16.gguf b/Llama-3.2-1B-Instruct-f16.gguf new file mode 100644 index 0000000..75e2bb8 --- /dev/null +++ b/Llama-3.2-1B-Instruct-f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f33ad43d2b85b908ff06fe7002b69806a57359b9b2617ca27d7bdea428ae146 +size 2479595360 diff --git a/README.md b/README.md index 0589e3a..e6f0488 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,99 @@ ---- -frameworks: -- other -license: Apache License 2.0 -tasks: -- text-generation - -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm ---- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 - -SDK下载 -```bash -#安装ModelScope -pip install modelscope -``` -```python -#SDK模型下载 -from modelscope import snapshot_download -model_dir = snapshot_download('second-state/Llama-3.2-1B-Instruct-GGUF') -``` -Git下载 -``` -#Git模型下载 -git clone https://www.modelscope.cn/second-state/Llama-3.2-1B-Instruct-GGUF.git -``` - -

如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。

\ No newline at end of file +--- +base_model: meta-llama/Llama-3.2-1B-Instruct +license: llama3.2 +model_creator: meta +model_name: Llama-3.2-1B-Instruct +quantized_by: Second State Inc. +language: +- en +- de +- fr +- it +- pt +- hi +- es +- th +library_name: transformers +pipeline_tag: text-generation +tags: +- chat +- llama +- llama-3 +--- + + + +
+ +
+
+ + +# Llama-3.2-1B-Instruct-GGUF + +## Original Model + +[meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) + +## Run with LlamaEdge + +- LlamaEdge version: [v0.14.5](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.14.5) and above + +- Prompt template + + - Prompt type: `llama-3-chat` + + - Prompt string + + ```text + <|begin_of_text|><|start_header_id|>system<|end_header_id|> + + {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> + + {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + + {{ model_answer_1 }}<|eot_id|><|start_header_id|>user<|end_header_id|> + + {{ user_message_2 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + ``` + +- Context size: `128000` + +- Run as LlamaEdge service + + ```bash + wasmedge --dir .:. --nn-preload default:GGML:AUTO:Llama-3.2-1B-Instruct-Q5_K_M.gguf \ + llama-api-server.wasm \ + --prompt-template llama-3-chat \ + --ctx-size 128000 \ + --model-name Llama-3.2-1b + ``` + +- Run as LlamaEdge command app + + ```bash + wasmedge --dir .:. --nn-preload default:GGML:AUTO:Llama-3.2-1B-Instruct-Q5_K_M.gguf \ + llama-chat.wasm \ + --prompt-template llama-3-chat \ + --ctx-size 128000 + ``` + +## Quantized GGUF Models + +| Name | Quant method | Bits | Size | Use case | +| ---- | ---- | ---- | ---- | ----- | +| [Llama-3.2-1B-Instruct-Q2_K.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q2_K.gguf) | Q2_K | 2 | 581 MB| smallest, significant quality loss - not recommended for most purposes | +| [Llama-3.2-1B-Instruct-Q3_K_L.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q3_K_L.gguf) | Q3_K_L | 3 | 733 MB| small, substantial quality loss | +| [Llama-3.2-1B-Instruct-Q3_K_M.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q3_K_M.gguf) | Q3_K_M | 3 | 691 MB| very small, high quality loss | +| [Llama-3.2-1B-Instruct-Q3_K_S.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q3_K_S.gguf) | Q3_K_S | 3 | 642 MB| very small, high quality loss | +| [Llama-3.2-1B-Instruct-Q4_0.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q4_0.gguf) | Q4_0 | 4 | 771 MB| legacy; small, very high quality loss - prefer using Q3_K_M | +| [Llama-3.2-1B-Instruct-Q4_K_M.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q4_K_M.gguf) | Q4_K_M | 4 | 808 MB| medium, balanced quality - recommended | +| [Llama-3.2-1B-Instruct-Q4_K_S.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q4_K_S.gguf) | Q4_K_S | 4 | 776 MB| small, greater quality loss | +| [Llama-3.2-1B-Instruct-Q5_0.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q5_0.gguf) | Q5_0 | 5 | 893 MB| legacy; medium, balanced quality - prefer using Q4_K_M | +| [Llama-3.2-1B-Instruct-Q5_K_M.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q5_K_M.gguf) | Q5_K_M | 5 | 912 MB| large, very low quality loss - recommended | +| [Llama-3.2-1B-Instruct-Q5_K_S.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q5_K_S.gguf) | Q5_K_S | 5 | 893 MB| large, low quality loss - recommended | +| [Llama-3.2-1B-Instruct-Q6_K.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q6_K.gguf) | Q6_K | 6 | 1.02 GB| very large, extremely low quality loss | +| [Llama-3.2-1B-Instruct-Q8_0.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-Q8_0.gguf) | Q8_0 | 8 | 1.32 GB| very large, extremely low quality loss - not recommended | +| [Llama-3.2-1B-Instruct-f16.gguf](https://huggingface.co/second-state/Llama-3.2-1B-Instruct-GGUF/blob/main/Llama-3.2-1B-Instruct-f16.gguf) | f16 | 16 | 2.48 GB| | + +*Quantized with llama.cpp b3807* diff --git a/config.json b/config.json new file mode 100644 index 0000000..3e3aaf5 --- /dev/null +++ b/config.json @@ -0,0 +1,39 @@ +{ + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "head_dim": 64, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 8192, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 16, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 32.0, + "high_freq_factor": 4.0, + "low_freq_factor": 1.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "transformers_version": "4.45.0.dev0", + "use_cache": true, + "vocab_size": 128256 +}