diff --git a/.gitattributes b/.gitattributes index 886ac0c..b7722c6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,38 +1,48 @@ *.7z filter=lfs diff=lfs merge=lfs -text *.arrow filter=lfs diff=lfs merge=lfs -text *.bin filter=lfs diff=lfs merge=lfs -text -*.bin.* filter=lfs diff=lfs merge=lfs -text *.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text *.ftz filter=lfs diff=lfs merge=lfs -text *.gz filter=lfs diff=lfs merge=lfs -text *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text *.onnx filter=lfs diff=lfs merge=lfs -text *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text *.xz filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text -*.zstandard filter=lfs diff=lfs merge=lfs -text -*.tfevents* filter=lfs diff=lfs merge=lfs -text -*.db* filter=lfs diff=lfs merge=lfs -text -*.ark* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text -**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text -*.safetensors filter=lfs diff=lfs merge=lfs -text -*.ckpt filter=lfs diff=lfs merge=lfs -text -*.gguf* filter=lfs diff=lfs merge=lfs -text -*.ggml filter=lfs diff=lfs merge=lfs -text -*.llamafile* filter=lfs diff=lfs merge=lfs -text -*.pt2 filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text +Meta-Llama-3.1-8B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text diff --git a/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf b/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf new file mode 100644 index 0000000..ab4c41a --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:978c23cad76854c8bf944a819159697215575a72a8d70910b41e9effbd7badfb +size 3179131552 diff --git a/Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf b/Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf new file mode 100644 index 0000000..d59fa31 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46bb86256326b4fea38c4adf8bd70b4e0ad5551db43ef9fe0e5489411ca0dc1f +size 4321956512 diff --git a/Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf b/Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf new file mode 100644 index 0000000..7c9f1d2 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1577fefd7a7d843f89b25d32849e3fa2e19a5373e620bce504d2dbc57b73f1cf +size 4018918048 diff --git a/Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf b/Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf new file mode 100644 index 0000000..f553f2a --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b74d716226d30c2516d26bbd4cff01f951433bfe717e1ef4b61899fbaf7a8152 +size 3664499360 diff --git a/Meta-Llama-3.1-8B-Instruct-Q4_0.gguf b/Meta-Llama-3.1-8B-Instruct-Q4_0.gguf new file mode 100644 index 0000000..5b6ba96 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q4_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:323f9cc1282b3d5bb817a0af2f3cfd0a6a68496d02ae6d420f5b14ed688ceb3f +size 4661211808 diff --git a/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf b/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf new file mode 100644 index 0000000..e217dc7 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:415df20ac06158f3b473bf6ea3aa666e052ed55e6e6ff6b0dbb9db7af4180d36 +size 4920734368 diff --git a/Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf b/Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf new file mode 100644 index 0000000..aa0fc38 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33a8ba1673c3e6fdca8397a8abd35af58cde851e471c30fc4a621f80274f3f92 +size 4692669088 diff --git a/Meta-Llama-3.1-8B-Instruct-Q5_0.gguf b/Meta-Llama-3.1-8B-Instruct-Q5_0.gguf new file mode 100644 index 0000000..0100c82 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q5_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b053deabe91246648fabf95b972d6a64e4077e432a288a50629aefe2fcfa719 +size 5599294112 diff --git a/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf b/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf new file mode 100644 index 0000000..3a9b901 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4e75ee4ff8356eb25ad9f5055cf527e7d2654c005dde8fb5481db50e5164e0f +size 5732987552 diff --git a/Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf b/Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf new file mode 100644 index 0000000..998a13d --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61a6be00c21161cf45c1f0172a0fdbf4a24f1cb58c84593c66c2db74527de2b3 +size 5599294112 diff --git a/Meta-Llama-3.1-8B-Instruct-Q6_K.gguf b/Meta-Llama-3.1-8B-Instruct-Q6_K.gguf new file mode 100644 index 0000000..327cfa8 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q6_K.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b49f782d79e5d33b203c5fca60140586ae3abeec80b894bf8db40f594786ece +size 6596006560 diff --git a/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf b/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf new file mode 100644 index 0000000..63e9446 --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91818e082e2d87acf79032cb0c9d8e4e4878b1ee38ba68f09e120d47b3aeb04c +size 8540770976 diff --git a/Meta-Llama-3.1-8B-Instruct-f16.gguf b/Meta-Llama-3.1-8B-Instruct-f16.gguf new file mode 100644 index 0000000..38ec6cd --- /dev/null +++ b/Meta-Llama-3.1-8B-Instruct-f16.gguf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82ff778433e6590189788b54c00aba834d6eb167808ec32ae659964392a07230 +size 16068891296 diff --git a/README.md b/README.md index 08c3198..4f4239b 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,115 @@ --- -license: Apache License 2.0 - -#model-type: -##如 gpt、phi、llama、chatglm、baichuan 等 -#- gpt - -#domain: -##如 nlp、cv、audio、multi-modal -#- nlp - -#language: -##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa -#- cn - -#metrics: -##如 CIDEr、Blue、ROUGE 等 -#- CIDEr - -#tags: -##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他 -#- pretrained - -#tools: -##如 vllm、fastchat、llamacpp、AdaSeq 等 -#- vllm +license: llama3.1 +model_name: Meta-Llama-3.1-8B-Instruct-GGUF +arxiv: 2307.09288 +base_model: meta-llama/Meta-Llama-3.1-8B-Instruct-GGUF +inference: false +model_creator: Meta Llama3 +model_type: llama +pipeline_tag: text-generation +quantized_by: Second State Inc. +language: +- en +- de +- fr +- it +- pt +- hi +- es +- th +tags: +- facebook +- meta +- pytorch +- llama +- llama-3 --- -### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。 -#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型 -SDK下载 -```bash -#安装ModelScope -pip install modelscope -``` -```python -#SDK模型下载 -from modelscope import snapshot_download -model_dir = snapshot_download('second-state/Meta-Llama-3.1-8B-Instruct-GGUF') -``` -Git下载 -``` -#Git模型下载 -git clone https://www.modelscope.cn/second-state/Meta-Llama-3.1-8B-Instruct-GGUF.git -``` + + +
+ +
+
+ -

如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。

\ No newline at end of file +# Meta-Llama-3.1-8B-Instruct-GGUF + +## Original Model + +[meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct) + +## Run with LlamaEdge + +- LlamaEdge version: [v0.12.4](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.12.4) and above + +- Prompt template + + - Prompt type: `llama-3-chat` + + - Prompt string + + ```text + <|begin_of_text|><|start_header_id|>system<|end_header_id|> + + {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> + + {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + + {{ model_answer_1 }}<|eot_id|><|start_header_id|>user<|end_header_id|> + + {{ user_message_2 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> + ``` + +- Context size: `128000` + +- Run as LlamaEdge service + + - Chat + + ```bash + wasmedge --dir .:. --nn-preload default:GGML:AUTO:Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf \ + llama-api-server.wasm \ + --prompt-template llama-3-chat \ + --ctx-size 128000 \ + --model-name Llama-3.1-8b + ``` + + - Tool use + + ```bash + wasmedge --dir .:. --nn-preload default:GGML:AUTO:Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf \ + llama-api-server.wasm \ + --prompt-template llama-3-tool \ + --ctx-size 128000 \ + --model-name Llama-3.1-8b + ``` + +- Run as LlamaEdge command app + + ```bash + wasmedge --dir .:. --nn-preload default:GGML:AUTO:Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf \ + llama-chat.wasm \ + --prompt-template llama-3-chat \ + --ctx-size 128000 + ``` + +## Quantized GGUF Models + +| Name | Quant method | Bits | Size | Use case | +| ---- | ---- | ---- | ---- | ----- | +| [Meta-Llama-3.1-8B-Instruct-Q2_K.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q2_K.gguf) | Q2_K | 2 | 3.18 GB| smallest, significant quality loss - not recommended for most purposes | +| [Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q3_K_L.gguf) | Q3_K_L | 3 | 4.32 GB| small, substantial quality loss | +| [Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q3_K_M.gguf) | Q3_K_M | 3 | 4.02 GB| very small, high quality loss | +| [Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q3_K_S.gguf) | Q3_K_S | 3 | 3.66 GB| very small, high quality loss | +| [Meta-Llama-3.1-8B-Instruct-Q4_0.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q4_0.gguf) | Q4_0 | 4 | 4.66 GB| legacy; small, very high quality loss - prefer using Q3_K_M | +| [Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf) | Q4_K_M | 4 | 4.92 GB| medium, balanced quality - recommended | +| [Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q4_K_S.gguf) | Q4_K_S | 4 | 4.69 GB| small, greater quality loss | +| [Meta-Llama-3.1-8B-Instruct-Q5_0.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q5_0.gguf) | Q5_0 | 5 | 5.6 GB| legacy; medium, balanced quality - prefer using Q4_K_M | +| [Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf) | Q5_K_M | 5 | 5.73 GB| large, very low quality loss - recommended | +| [Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q5_K_S.gguf) | Q5_K_S | 5 | 5.6 GB| large, low quality loss - recommended | +| [Meta-Llama-3.1-8B-Instruct-Q6_K.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q6_K.gguf) | Q6_K | 6 | 6.6 GB| very large, extremely low quality loss | +| [Meta-Llama-3.1-8B-Instruct-Q8_0.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-Q8_0.gguf) | Q8_0 | 8 | 8.54 GB| very large, extremely low quality loss - not recommended | +| [Meta-Llama-3.1-8B-Instruct-f16.gguf](https://huggingface.co/second-state/Meta-Llama-3.1-8B-Instruct-GGUF/blob/main/Meta-Llama-3.1-8B-Instruct-f16.gguf) | f16 | 16 | 16.1 GB| | + +*Quantized with llama.cpp b3445.* diff --git a/config.json b/config.json new file mode 100644 index 0000000..0bb6fd7 --- /dev/null +++ b/config.json @@ -0,0 +1,38 @@ +{ + "architectures": [ + "LlamaForCausalLM" + ], + "attention_bias": false, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": [ + 128001, + 128008, + 128009 + ], + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 131072, + "mlp_bias": false, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": { + "factor": 8.0, + "low_freq_factor": 1.0, + "high_freq_factor": 4.0, + "original_max_position_embeddings": 8192, + "rope_type": "llama3" + }, + "rope_theta": 500000.0, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.42.3", + "use_cache": true, + "vocab_size": 128256 +} diff --git a/configuration.json b/configuration.json new file mode 100644 index 0000000..bbeeda1 --- /dev/null +++ b/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "text-generation", "allow_remote": true} \ No newline at end of file