初始化项目,由ModelHub XC社区提供模型
Model: solidrust/Hermes-3-Llama-3.1-8B-AWQ Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
80
README.md
Normal file
80
README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
---
|
||||
base_model: NousResearch/Hermes-3-Llama-3.1-8B
|
||||
inference: false
|
||||
library_name: transformers
|
||||
pipeline_tag: text-generation
|
||||
quantized_by: Suparious
|
||||
tags:
|
||||
- 4-bit
|
||||
- AWQ
|
||||
- text-generation
|
||||
- autotrain_compatible
|
||||
- endpoints_compatible
|
||||
---
|
||||
# NousResearch/Hermes-3-Llama-3.1-8B AWQ
|
||||
|
||||
- Model creator: [NousResearch](https://huggingface.co/NousResearch)
|
||||
- Original model: [Hermes-3-Llama-3.1-8B](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)
|
||||
|
||||
|
||||
|
||||
## How to use
|
||||
|
||||
### Install the necessary packages
|
||||
|
||||
```bash
|
||||
pip install --upgrade autoawq autoawq-kernels
|
||||
```
|
||||
|
||||
### Example Python code
|
||||
|
||||
```python
|
||||
from awq import AutoAWQForCausalLM
|
||||
from transformers import AutoTokenizer, TextStreamer
|
||||
|
||||
model_path = "solidrust/Hermes-3-Llama-3.1-8B-AWQ"
|
||||
system_message = "You are Hermes-3-Llama-3.1-8B, incarnated as a powerful AI. You were created by NousResearch."
|
||||
|
||||
# Load model
|
||||
model = AutoAWQForCausalLM.from_quantized(model_path,
|
||||
fuse_layers=True)
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path,
|
||||
trust_remote_code=True)
|
||||
streamer = TextStreamer(tokenizer,
|
||||
skip_prompt=True,
|
||||
skip_special_tokens=True)
|
||||
|
||||
# Convert prompt to tokens
|
||||
prompt_template = """\
|
||||
<|im_start|>system
|
||||
{system_message}<|im_end|>
|
||||
<|im_start|>user
|
||||
{prompt}<|im_end|>
|
||||
<|im_start|>assistant"""
|
||||
|
||||
prompt = "You're standing on the surface of the Earth. "\
|
||||
"You walk one mile south, one mile west and one mile north. "\
|
||||
"You end up exactly where you started. Where are you?"
|
||||
|
||||
tokens = tokenizer(prompt_template.format(system_message=system_message,prompt=prompt),
|
||||
return_tensors='pt').input_ids.cuda()
|
||||
|
||||
# Generate output
|
||||
generation_output = model.generate(tokens,
|
||||
streamer=streamer,
|
||||
max_new_tokens=512)
|
||||
```
|
||||
|
||||
### About AWQ
|
||||
|
||||
AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings.
|
||||
|
||||
AWQ models are currently supported on Linux and Windows, with NVidia GPUs only. macOS users: please use GGUF models instead.
|
||||
|
||||
It is supported by:
|
||||
|
||||
- [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ
|
||||
- [vLLM](https://github.com/vllm-project/vllm) - version 0.2.2 or later for support for all model types.
|
||||
- [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference)
|
||||
- [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later, from any code or client that supports Transformers
|
||||
- [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code
|
||||
43
config.json
Normal file
43
config.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"_name_or_path": "/opt/openbet/inference/data/NousResearch-Hermes-3-Llama-3.1-8B",
|
||||
"architectures": [
|
||||
"LlamaForCausalLM"
|
||||
],
|
||||
"attention_bias": false,
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 128000,
|
||||
"eos_token_id": 128040,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 4096,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 14336,
|
||||
"max_position_embeddings": 131072,
|
||||
"mlp_bias": false,
|
||||
"model_type": "llama",
|
||||
"num_attention_heads": 32,
|
||||
"num_hidden_layers": 32,
|
||||
"num_key_value_heads": 8,
|
||||
"pretraining_tp": 1,
|
||||
"quantization_config": {
|
||||
"bits": 4,
|
||||
"group_size": 128,
|
||||
"modules_to_not_convert": null,
|
||||
"quant_method": "awq",
|
||||
"version": "gemm",
|
||||
"zero_point": true
|
||||
},
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_scaling": {
|
||||
"factor": 8.0,
|
||||
"high_freq_factor": 4.0,
|
||||
"low_freq_factor": 1.0,
|
||||
"original_max_position_embeddings": 8192,
|
||||
"rope_type": "llama3"
|
||||
},
|
||||
"rope_theta": 500000.0,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "float16",
|
||||
"transformers_version": "4.44.2",
|
||||
"use_cache": true,
|
||||
"vocab_size": 128256
|
||||
}
|
||||
9
generation_config.json
Normal file
9
generation_config.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 128000,
|
||||
"do_sample": true,
|
||||
"eos_token_id": 128040,
|
||||
"temperature": 0.6,
|
||||
"top_p": 0.9,
|
||||
"transformers_version": "4.44.2"
|
||||
}
|
||||
3
model-00001-of-00002.safetensors
Normal file
3
model-00001-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:fc5de4a8d9f33db969b24553c99d46224268dafc857b0b8f1f91277c619b6199
|
||||
size 4677265296
|
||||
3
model-00002-of-00002.safetensors
Normal file
3
model-00002-of-00002.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:0a2a4b281f236cf8279b65c2137146231b0a66e8c766af21980bb2b337200ff9
|
||||
size 1050673280
|
||||
746
model.safetensors.index.json
Normal file
746
model.safetensors.index.json
Normal file
@@ -0,0 +1,746 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 5727854592
|
||||
},
|
||||
"weight_map": {
|
||||
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.28.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.29.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.30.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.up_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.up_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.up_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.down_proj.qweight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.down_proj.qzeros": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.mlp.down_proj.scales": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.layers.31.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
||||
"model.norm.weight": "model-00001-of-00002.safetensors",
|
||||
"lm_head.weight": "model-00002-of-00002.safetensors"
|
||||
}
|
||||
}
|
||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|begin_of_text|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|im_end|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
410563
tokenizer.json
Normal file
410563
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
2072
tokenizer_config.json
Normal file
2072
tokenizer_config.json
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user