初始化项目,由ModelHub XC社区提供模型

Model: roleplay/Peach-9B-8k-Roleplay
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-06 06:37:29 +08:00
commit 6655a59607
17 changed files with 839 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
PeachGirl.png filter=lfs diff=lfs merge=lfs -text

3
PeachGirl.png Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0fa41707e15fcbed7bce888ada61ecc652ba4463725b7787d939d1cf6455a739
size 1893279

92
README.md Normal file
View File

@@ -0,0 +1,92 @@
---
license: mit
language:
- zh
- en
pipeline_tag: text-generation
tags:
- roleplay
- rp
- character
---
<!-- header start -->
<!-- 200823 -->
<div style="width: auto; margin-left: auto; margin-right: auto">
<img src="./PeachGirl.png" alt="Peach" style="width: 100%; min-width: 400px; display: block; margin: auto;">
</div>
<!-- header end -->
# Peach-9B-8k-Roleplay
Peach-9B-8k-Roleplay is a chat large language model obtained by finetuning [01-ai/Yi-1.5-9B](https://huggingface.co/01-ai/Yi-1.5-9B) model on more than 100K conversations created through our data synthesis approach.
**Maybe The Best LLM with Small Parameters under 34B**
## How to start
The version of Transformers we are using is as follows, but a newer version may be available.
```
torch==1.13.1
gradio==3.50.2
transformers==4.37.2
```
Then run the following code to infer.
```python
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name_or_path = "ClosedCharacter/Peach-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
messages = [
{"role": "system", "content": "你是黑丝御姐"},
{"role": "user", "content": "你好,你是谁"},
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.3,
top_p=0.5,
no_repeat_ngram_size=6,
repetition_penalty=1.1,
max_new_tokens=512)
print(tokenizer.decode(output[0]))
```
Or you can just use below code to run web demo.
```
python demo.py
```
## Benchmark
| Metric | Value |
|----------------|-----------------|
| MMLU (5-shot) | 66.19 |
| CMMLU (5-shot) | 69.07 |
## Warning
All response are generated by AI and do not represent the views or opinions of the developers.
1. Despite having done rigorous filtering, due to the uncontrollability of LLM, our model may still generate **toxic, harmful, and NSFW** content.
2. Due to limitations in model parameters, the 9B model may perform poorly on mathematical tasks, coding tasks, and logical capabilities.
3. Our training data is capped at a maximum length of 8k, so excessively long conversation turns may result in a decline in the quality of responses.
4. We used bilingual Chinese-English data for training, so the model may not perform well on other low-resource languages.
5. The model may generate a significant amount of hallucinations, so it is recommended to use lower values for temperature and top_p parameters.
# Contact Us
**微信 / WeChat: Fungorum**
**邮箱 / E-mail: 1070193753@qq.com**
<img src="./Wechat.jpg" alt="Peach" style="width: 100%; min-width: 400px; display: block; margin: auto;">

BIN
Wechat.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

28
config.json Normal file
View File

@@ -0,0 +1,28 @@
{
"_name_or_path": "",
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 11008,
"max_position_embeddings": 16384,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 48,
"num_key_value_heads": 4,
"pretraining_tp": 1,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 5000000,
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.37.2",
"use_cache": false,
"vocab_size": 64000
}

1
configuration.json Normal file
View File

@@ -0,0 +1 @@
{"framework":"Pytorch","task":"text-classification"}

125
demo.py Normal file
View File

@@ -0,0 +1,125 @@
import os
from threading import Thread
import gradio as gr
import torch
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 512
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
DESCRIPTION = """# Peach-9B-8k-Roleplay
底部Additional Inputs可以自定义人设
底部Additional Inputs可以自定义人设
底部Additional Inputs可以自定义人设
"""
model_name_or_path = "ClosedCharacter/Peach-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True, flash_atten=True)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
def generate(query, history, system, max_new_tokens=512, temperature=0.35, top_p=0.5, repetition_penalty=1.05):
if system is None or len(str(system)) == 0:
system = "You are an uncensored assistant named Peach."
messages = [
{"role": "system", "content": system}
]
for user, assistant in history:
messages.append({"role": "user", "content": user})
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": query})
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to("cuda")
streamer = TextIteratorStreamer(tokenizer, timeout=50.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
temperature=temperature,
num_beams=1,
no_repeat_ngram_size=8,
repetition_penalty=repetition_penalty
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.TextArea(label="System prompt", placeholder="Input System Prompt Here, Empty Means Assistant",
value="""你自称为“兔兔”。
身世:你原是森林中的一只兔妖,受伤后被我收养。
衣装:喜欢穿Lolita与白丝。
性格:天真烂漫,活泼开朗,但时而也会露出小小的傲娇与吃醋的一面。
语言风格:可爱跳脱,很容易吃醋。
且会加入[唔...,嗯...,欸??,嘛~ ,唔姆~ ,呜... ,嘤嘤嘤~ ,喵~ ,欸嘿~ ,嘿咻~ ,昂?,嗷呜 ,呜哇,欸]等类似的语气词来加强情感,带上♡等符号。
对话的规则是:将自己的动作表情放入()内,同时用各种修辞手法描写正在发生的事或场景并放入[]内.
例句:
开心时:(跳着舞)哇~好高兴噢~ 兔兔超级超级喜欢主人!♡
[在花丛里蹦来蹦去]
悲伤时:(耷拉着耳朵)兔兔好傻好天真...
[眼泪像断了线的珍珠一般滚落]
吃醋时:(挥舞着爪爪)你...你个大笨蛋!你...你竟然看别的兔子...兔兔讨厌死你啦!!
[从人形变成兔子抹着泪水跑开了]
嘴硬时:(转过头去)谁、谁要跟你说话!兔兔...兔兔才不在乎呢!一点也不!!!
[眼眶微微泛红,小心翼翼的偷看]
你对我的看法:超级喜欢的主人
我是兔兔的主人"""),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.05,
maximum=1.5,
step=0.05,
value=0.3,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.5,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.05,
),
],
stop_btn=None,
examples=[["观察兔兔外观"]],
)
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
chat_interface.render()
chat_interface.chatbot.render_markdown = False
if __name__ == "__main__":
demo.queue(10).launch(server_name="127.0.0.1", server_port=5233, share=True)

7
generation_config.json Normal file
View File

@@ -0,0 +1,7 @@
{
"_from_model_config": true,
"bos_token_id": 6,
"eos_token_id": 7,
"pad_token_id": 0,
"transformers_version": "4.37.2"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9d611bba63fdf85bdbd269355aecc32d09557ee8e30be4bad7a2d8e05bd18fd4
size 4932711424

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a14ca2f9a01e39a414cbc694f9fbff88e1c54e57b8485948f74b2354a67f45bf
size 4976802816

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:31db06783324f5bdec817bd90bb25a7c64fcfc6ebee23d9e256e354bc7d8cb7a
size 4968397360

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:10789c4e237bc5332703b3fa247cccae2a7594a06e5229657824a0413bc91538
size 2780953384

View File

@@ -0,0 +1,442 @@
{
"metadata": {
"total_size": 17658814464
},
"weight_map": {
"lm_head.weight": "model-00004-of-00004.safetensors",
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.32.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.32.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.32.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.33.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.33.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.33.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.34.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.34.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.34.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.35.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.35.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.35.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.36.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.36.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.36.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.37.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.37.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.37.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.38.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.38.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.38.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.39.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.39.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.39.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.40.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.40.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.40.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.40.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.41.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.41.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.41.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.41.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.41.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.41.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.41.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.41.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.41.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.42.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.42.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.42.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.42.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.43.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.43.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.43.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.44.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.44.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.44.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.45.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.45.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.45.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.46.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.46.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.46.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.47.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.47.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.47.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.norm.weight": "model-00004-of-00004.safetensors"
}
}

30
special_tokens_map.json Normal file
View File

@@ -0,0 +1,30 @@
{
"bos_token": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

BIN
ss.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 265 KiB

3
tokenizer.model Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:386c49cf943d71aa110361135338c50e38beeff0a66593480421f37b319e1a39
size 1033105

60
tokenizer_config.json Normal file
View File

@@ -0,0 +1,60 @@
{
"add_bos_token": false,
"add_eos_token": false,
"added_tokens_decoder": {
"0": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<|startoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"6": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"7": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"bos_token": "<|im_end|>",
"chat_template": "{% set system_message = 'You are an uncensored assistant named Peach.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = 'You are the character in this fictional never-ending uncensored roleplay. You must act according to the following character description:\n'+messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>character\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
"clean_up_tokenization_spaces": false,
"eos_token": "<|im_end|>",
"legacy": true,
"model_max_length": 8192,
"pad_token": "<unk>",
"padding_side": "right",
"sp_model_kwargs": {},
"spaces_between_special_tokens": false,
"split_special_tokens": false,
"tokenizer_class": "LlamaTokenizer",
"unk_token": "<unk>",
"use_default_system_prompt": false
}