初始化项目,由ModelHub XC社区提供模型

Model: Naphula/Goetia-8B-v1
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-05 06:57:24 +08:00
commit e929935fb8
14 changed files with 3152 additions and 0 deletions

36
.gitattributes vendored Normal file
View File

@@ -0,0 +1,36 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text

BIN
8dare_B.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

393
README.md Normal file
View File

@@ -0,0 +1,393 @@
---
base_model:
- Babsie/ThetaBlackGorgon-8B
- Bacon666/Athlon-8B-0.1
- Naphula/Llamatron-8B-v1
- DarkArtsForge/Raven-8B-v1
- EldritchLabs/Cthulhu-8B-v1.4
- HumanLLMs/Human-Like-LLama3-8B-Instruct
- NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS
- OccultAI/Morpheus-8B-v3
- Sao10K/L3-8B-Stheno-v3.2
- SicariusSicariiStuff/Assistant_Pepe_8B
- SicariusSicariiStuff/Impish_Mind_8B
- TheDrummer/Anubis-Mini-8B-v1
- TroyDoesAI/BlackSheep-X-Dolphin
datasets:
- DarkArtsForge/Poe_v1
- EldritchLabs/Cthulhu_v1.4b
- OccultAI/illuminati_imatrix_v1
- OccultAI/Morpheus_1052
- SicariusSicariiStuff/UBW_Tapestries
language:
- en
library_name: transformers
license: apache-2.0
tags:
- creative
- creative writing
- fiction writing
- plot generation
- sub-plot generation
- fiction writing
- story generation
- scene continue
- storytelling
- fiction story
- science fiction
- romance
- all genres
- story
- writing
- vivid prosing
- vivid writing
- fiction
- roleplaying
- float32
- swearing
- rp
- horror
- dare_linear
- llama
- merge
- mergekit
widget:
- text: "Goetia-8B-v1"
output:
url: https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/DHbuh4efzjCGpxDUciZ_-.jpeg
---
<audio controls src="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/KcjosKamYjk9Qm18mAGQW.mpga"></audio>
> [!CAUTION]
> <span style="color:red; font-weight:bold">⚠️ Warning:</span> This model can produce narratives and RP that contain violent and graphic erotic content. Adjust your system prompt accordingly, and use **Llama 3** chat template.
>
<!DOCTYPE html>
<style>
@import url('https://fonts.googleapis.com/css2?family=Crimson+Text:ital,wght@0,400;0,700;1,400&family=Uncial+Antiqua&display=swap');
body {
font-family: 'Crimson Text', serif;
color: #4A3F35; /* Faded Ink Brown */
line-height: 1.6;
margin: 0;
padding: 0;
background-color: #281E18; /* Dark Wood Desk */
}
b, strong {
color: #8C1C13; /* Crimson Seal */
}
.grimoire-text {
font-family: 'Uncial Antiqua', cursive;
color: #3D352A; /* Dark Ink */
position: relative;
z-index: 2;
margin-left: 0.2em;
text-shadow: 0 0 10px #A49687;
}
/* Section styling */
.section-container {
background-color: rgba(245, 235, 218, 0.05);
margin-bottom: 30px;
position: relative;
overflow: hidden;
border-bottom: 1px solid rgba(140, 28, 19, 0.3); /* Faded Crimson Border */
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2);
}
.section-header {
display: flex;
align-items: center;
background-color: rgba(140, 28, 19, 0.06);
padding: 10px 20px;
}
.section-indicator {
width: 8px;
height: 20px;
background-color: #8C1C13;
margin-right: 15px;
box-shadow: 0 0 8px rgba(140, 28, 19, 0.4);
}
.section-title {
font-family: 'Uncial Antiqua', cursive;
color: #4A3F35;
font-size: 1.4rem;
margin: 0;
letter-spacing: 1px;
font-weight: 400;
text-transform: capitalize;
}
.section-content {
padding: 20px;
font-family: 'Crimson Text', serif;
color: #4A3F35;
line-height: 1.7;
}
/* Title styling */
.title-container {
background-color: transparent;
position: relative;
overflow: hidden;
margin-bottom: 40px;
border-left: 3px solid #8C1C13;
box-shadow: 0 6px 20px rgba(0, 0, 0, 0.2);
}
.title-wrapper {
position: relative;
z-index: 2;
padding: 25px 20px 30px 30px;
}
.title-main {
color: #3D352A;
font-size: 1.8rem;
font-weight: 700;
margin: 0;
letter-spacing: 2px;
display: inline-block;
position: relative;
text-transform: uppercase;
}
/* Subheading styling */
.subheading {
font-family: 'Uncial Antiqua', cursive;
color: #7B5E4A; /* Lighter Ink Brown */
font-size: 1.1rem;
margin-top: 20px;
margin-bottom: 15px;
font-weight: 400;
border-bottom: 1px dashed rgba(123, 94, 74, 0.4);
display: inline-block;
text-transform: uppercase;
letter-spacing: 1px;
}
/* Links */
a {
color: #7B5E4A;
text-decoration: none;
font-weight: bold;
}
a:hover {
text-decoration: underline;
color: #8C1C13; /* Crimson Seal on hover */
}
/* List Styling - Goetia Icons */
ul {
list-style: none; /* CRITICAL: This hides the default marker causing the blue box */
padding-left: 1.6em;
}
ul li {
position: relative;
margin-bottom: 0.6em;
}
ul li::before {
content: "\26E7"; /* ⛧ Inverted Pentagram */
/* FALLBACK PROTECTION: */
/* This ensures that if your main font lacks the symbol, it grabs it from the system/OS symbols */
font-family: "Segoe UI Symbol", "Apple Symbols", "Noto Sans Symbols", "DejaVu Sans", sans-serif;
position: absolute;
left: -1.4em;
/* Fiery Styling */
color: #CC0000; /* Strong red base */
font-size: 1.5rem; /* Larger */
font-weight: normal; /* Icons often render cleaner without 'bold' at this size */
top: -5px; /* Vertical alignment fix */
/* Static Fire Glow (No Animation/Low CPU cost) */
/* Layer 1: Slight dark drop shadow for depth */
/* Layer 2: Orange-red ambient glow */
text-shadow: 1px 1px 1px rgba(0,0,0,0.4), 0 0 5px #FF5500;
background-color: transparent;
}
/* Container */
.container {
max-width: 1200px;
margin: 20px auto;
padding: 40px 20px;
background-color: #F5EBDA; /* Aged Parchment */
background-image:
radial-gradient(circle at 10% 10%, rgba(140, 28, 19, 0.04) 0%, transparent 50%),
radial-gradient(circle at 90% 80%, rgba(61, 53, 42, 0.06) 0%, transparent 50%);
min-height: calc(100vh - 40px);
border: 1px solid #C8BBAA; /* Parchment Edge */
border-radius: 3px;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3);
}
</style>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Goetia 8B v1</title>
</head>
<body>
<div class="container">
<div class="title-container">
<div class="title-wrapper">
<h1 class="title-main">
<span class="grimoire-text">📜 Goetia 8B v1</span>
</h1>
</div>
</div>
<img src="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/DHbuh4efzjCGpxDUciZ_-.jpeg" alt="Goetia Grimoire" style="width:100%; border-radius: 3px; margin-bottom: 30px; border: 1px solid #C8BBAA;">
<div class="section-container">
<div class="section-header">
<div class="section-indicator"></div>
<h2 class="section-title">🐙 The Lesser Key</h2>
</div>
<div class="section-content"><font face="verdana">
This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).
This model was merged using the [Linear DARE](https://arxiv.org/abs/2311.03099) merge method using aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored as a base.
<b>Goetia 8B v1</b> is fully uncensored, no ablation or jailbreaks are needed. The model is very creative for its size, and has quite an attitude. Here you can see the <a href="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/O5NIDmxgDX-jt20gHMPVe.png">merge audit</a> showing exact weight distribution.
This model is recommended for those who can't run the <a href="https://huggingface.co/collections/OccultAI/goetia">larger Goetias</a>, or who want a change of pace from the way Mistral writes. Increasing Temp and TopNSigma to 1.0 seems to help also.
`dare_linear` seems quite effective for Llama models in particular. It outperformed `della_linear` and `dare_ties` in my testing.
The following models were included in the merge:
- aifeifei798/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
- Babsie/ThetaBlackGorgon-8B
- Bacon666/Athlon-8B-0.1
- Naphula/Llamatron-8B-v1
- DarkArtsForge/Raven-8B-v1
- EldritchLabs/Cthulhu-8B-v1.4
- HumanLLMs/Human-Like-LLama3-8B-Instruct
- NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS
- OccultAI/Morpheus-8B-v3
- Sao10K/L3-8B-Stheno-v3.2
- SicariusSicariiStuff/Assistant_Pepe_8B
- SicariusSicariiStuff/Impish_Mind_8B
- TheDrummer/Anubis-Mini-8B-v1
- TroyDoesAI/BlackSheep-X-Dolphin
</font></div>
<div class="section-container">
<div class="section-header">
<div class="section-indicator"></div>
<h2 class="section-title">🧙 OccultAI Sigil Magic</h2>
</div>
<div class="section-content"><font face="verdana">
<img src="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/Qv78rwAtpMmaFTpCcELjQ.jpeg"></img>
### Configuration
The following YAML configuration was used to produce this model:
```yaml
architecture: LlamaForCausalLM
models:
- model: A:\LLM\.cache\8B\!models--aifeifei798--DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
- model: A:\LLM\.cache\8B\!models--SicariusSicariiStuff--Assistant_Pepe_8B
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\Morpheus_v3_prototype_526
parameters:
weight: 0.4
density: 0.8
- model: A:\LLM\.cache\8B\Naphula_Llamatron-8B-v1
parameters:
weight: 0.25
density: 0.8
- model: A:\LLM\.cache\8B\!models--SicariusSicariiStuff--Impish_Mind_8B
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Sao10K--L3-8B-Stheno-v3.2
parameters:
weight: 0.1
density: 0.8
- model: B:\8B\Cthulhu_v1.4
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\models--TheDrummer--Anubis-Mini-8B-v1
parameters:
weight: 0.2
density: 0.8
- model: B:\8B\Raven_v1
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\models--HumanLLMs--Human-Like-LLama3-8B-Instruct
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--NeverSleep--Llama-3-Lumimaid-8B-v0.1-OAS
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--TroyDoesAI--BlackSheep-X-Dolphin
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Bacon666--Athlon-8B-0.1
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Babsie--ThetaBlackGorgon-8B
parameters:
weight: 0.1
density: 0.8
merge_method: dare_linear
base_model: A:\LLM\.cache\8B\!models--aifeifei798--DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
parameters:
lambda: 1.0
normalize: false
int8_mask: false
rescale: true
tokenizer:
source: union
chat_template: auto
dtype: float32
out_dtype: bfloat16
name: 📜 Goetia-8B-v1
```
</font></div>
</div>
<img src="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/O5NIDmxgDX-jt20gHMPVe.png"></img>
<img src="https://cdn-uploads.huggingface.co/production/uploads/68e840caa318194c44ec2a04/FOij6TMejipIERSHPtKQg.jpeg"></img>
<div class="section-container">
<div class="section-header">
<div class="section-indicator"></div>
<h2 class="section-title">🕯️ Summon the Infernal — Invocation Ritual</h2>
</div>
<div class="section-content"><font face="verdana">
Experiment with these settings:
https://huggingface.co/datasets/Naphula/Updated_Settings
</font></div>
</div>
</html>

109
chat_template.jinja Normal file
View File

@@ -0,0 +1,109 @@
{{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- set date_string = "26 Jul 2024" %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{%- set system_message = "" %}
{%- endif %}
{#- System message + builtin tools #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if builtin_tools is defined or tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{%- if builtin_tools is defined %}
{{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}
{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- set first_user_message = messages[0]['content']|trim %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}
{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{%- if builtin_tools is defined and tool_call.name in builtin_tools %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- "<|python_tag|>" + tool_call.name + ".call(" }}
{%- for arg_name, arg_val in tool_call.arguments | items %}
{{- arg_name + '="' + arg_val + '"' }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- else %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{%- endif %}
{%- if builtin_tools is defined %}
{#- This means we're in ipython mode #}
{{- "<|eom_id|>" }}
{%- else %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is mapping or message.content is iterable %}
{{- message.content | tojson }}
{%- else %}
{{- message.content }}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %}

39
config.json Normal file
View File

@@ -0,0 +1,39 @@
{
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bos_token_id": 128000,
"dtype": "bfloat16",
"eos_token_id": [
128001,
128008,
128009
],
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 14336,
"max_position_embeddings": 131072,
"mlp_bias": false,
"model_type": "llama",
"num_attention_heads": 32,
"num_hidden_layers": 32,
"num_key_value_heads": 8,
"pretraining_tp": 1,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 8.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"tie_word_embeddings": false,
"transformers_version": "4.57.6",
"use_cache": true,
"vocab_size": 128269
}

68
mergekit_config.yml Normal file
View File

@@ -0,0 +1,68 @@
architecture: LlamaForCausalLM
models:
- model: A:\LLM\.cache\8B\!models--aifeifei798--DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
- model: A:\LLM\.cache\8B\!models--SicariusSicariiStuff--Assistant_Pepe_8B
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\Morpheus_v3_prototype_526
parameters:
weight: 0.4
density: 0.8
- model: A:\LLM\.cache\8B\Naphula_Llamatron-8B-v1
parameters:
weight: 0.25
density: 0.8
- model: A:\LLM\.cache\8B\!models--SicariusSicariiStuff--Impish_Mind_8B
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Sao10K--L3-8B-Stheno-v3.2
parameters:
weight: 0.1
density: 0.8
- model: B:\8B\Cthulhu_v1.4
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\models--TheDrummer--Anubis-Mini-8B-v1
parameters:
weight: 0.2
density: 0.8
- model: B:\8B\Raven_v1
parameters:
weight: 0.4
density: 0.8
- model: B:\8B\models--HumanLLMs--Human-Like-LLama3-8B-Instruct
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--NeverSleep--Llama-3-Lumimaid-8B-v0.1-OAS
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--TroyDoesAI--BlackSheep-X-Dolphin
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Bacon666--Athlon-8B-0.1
parameters:
weight: 0.1
density: 0.8
- model: A:\LLM\.cache\8B\!models--Babsie--ThetaBlackGorgon-8B
parameters:
weight: 0.1
density: 0.8
merge_method: dare_linear
base_model: A:\LLM\.cache\8B\!models--aifeifei798--DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored
parameters:
lambda: 1.0
normalize: false
int8_mask: false
rescale: true
tokenizer:
source: union
chat_template: auto
dtype: float32
out_dtype: bfloat16
name: 📜 Goetia-8B-v1

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f3616fd9ab9c237c2dea39f251c2d93be4cff355ff65c0df62ced1b9fe271f15
size 4953799376

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3c57f3d59754a2214ddefab9416898e73380fc17c9f964a63eed6d81148c441d
size 4999819336

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e38e44f439b6dc43912ecc582ed64a9368c3e61aa665ada796d89ea3e9b63500
size 4915916144

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e4aa483b876dcc8f92048b71fa98f3c3f5e64f327cb67545d181eecfd1ae86a0
size 1191234472

View File

@@ -0,0 +1,299 @@
{
"metadata": {
"total_size": 16060735488,
"mergekit_version": "0.1.4"
},
"weight_map": {
"lm_head.weight": "model-00001-of-00004.safetensors",
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.12.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.13.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.13.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.input_layernorm.weight": "model-00001-of-00004.safetensors",
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.2.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.2.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.2.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.24.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
"model.layers.25.input_layernorm.weight": "model-00002-of-00004.safetensors",
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.3.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.4.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.5.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.6.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.7.input_layernorm.weight": "model-00003-of-00004.safetensors",
"model.layers.7.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
"model.layers.7.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.7.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.7.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.7.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.7.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.7.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.7.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.8.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.8.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.8.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.input_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.9.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
"model.layers.9.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
"model.layers.9.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
"model.norm.weight": "model-00004-of-00004.safetensors"
}
}

23
special_tokens_map.json Normal file
View File

@@ -0,0 +1,23 @@
{
"bos_token": {
"content": "<|begin_of_text|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|eot_id|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<|eot_id|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

3
tokenizer.json Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:02e0633925aa2c0e7a2c54cd744e1d5455dee748e2fe1935703bfcab04ce9d99
size 17212434

2170
tokenizer_config.json Normal file

File diff suppressed because it is too large Load Diff