初始化项目,由ModelHub XC社区提供模型
Model: LLM-Research/Codestral-22B-v0.1 Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
194
README.md
Normal file
194
README.md
Normal file
@@ -0,0 +1,194 @@
|
||||
---
|
||||
library_name: vllm
|
||||
language:
|
||||
- code
|
||||
license: other
|
||||
tags:
|
||||
- code
|
||||
- mistral-common
|
||||
inference: false
|
||||
license_name: mnpl
|
||||
license_link: https://mistral.ai/licences/MNPL-0.1.md
|
||||
|
||||
extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
||||
---
|
||||
|
||||
# Model Card for Codestral-22B-v0.1
|
||||
|
||||
|
||||
## Encode and Decode with `mistral_common`
|
||||
|
||||
```py
|
||||
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
||||
from mistral_common.protocol.instruct.messages import UserMessage
|
||||
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
||||
|
||||
mistral_models_path = "MISTRAL_MODELS_PATH"
|
||||
|
||||
tokenizer = MistralTokenizer.v3()
|
||||
|
||||
completion_request = ChatCompletionRequest(messages=[UserMessage(content="Explain Machine Learning to me in a nutshell.")])
|
||||
|
||||
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
||||
```
|
||||
|
||||
## Inference with `mistral_inference`
|
||||
|
||||
```py
|
||||
from mistral_inference.transformer import Transformer
|
||||
from mistral_inference.generate import generate
|
||||
|
||||
model = Transformer.from_folder(mistral_models_path)
|
||||
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
||||
|
||||
result = tokenizer.decode(out_tokens[0])
|
||||
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Inference with hugging face `transformers`
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("mistralai/Codestral-22B-v0.1")
|
||||
model.to("cuda")
|
||||
|
||||
generated_ids = model.generate(tokens, max_new_tokens=1000, do_sample=True)
|
||||
|
||||
# decode with mistral tokenizer
|
||||
result = tokenizer.decode(generated_ids[0].tolist())
|
||||
print(result)
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> PRs to correct the `transformers` tokenizer so that it gives 1-to-1 the same results as the `mistral_common` reference implementation are very welcome!
|
||||
|
||||
---
|
||||
|
||||
Codestral-22B-v0.1 is trained on a diverse dataset of 80+ programming languages, including the most popular ones, such as Python, Java, C, C++, JavaScript, and Bash (more details in the [Blogpost](https://mistral.ai/news/codestral/)). The model can be queried:
|
||||
- As instruct, for instance to answer any questions about a code snippet (write documentation, explain, factorize) or to generate code following specific indications
|
||||
- As Fill in the Middle (FIM), to predict the middle tokens between a prefix and a suffix (very useful for software development add-ons like in VS Code)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
It is recommended to use `mistralai/Codestral-22B-v0.1` with [mistral-inference](https://github.com/mistralai/mistral-inference).
|
||||
|
||||
```
|
||||
pip install mistral_inference
|
||||
```
|
||||
|
||||
## Download
|
||||
|
||||
```py
|
||||
from huggingface_hub import snapshot_download
|
||||
from pathlib import Path
|
||||
|
||||
mistral_models_path = Path.home().joinpath('mistral_models', 'Codestral-22B-v0.1')
|
||||
mistral_models_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
snapshot_download(repo_id="mistralai/Codestral-22B-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
|
||||
```
|
||||
|
||||
### Chat
|
||||
|
||||
After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment.
|
||||
|
||||
```
|
||||
mistral-chat $HOME/mistral_models/Codestral-22B-v0.1 --instruct --max_tokens 256
|
||||
```
|
||||
|
||||
Will generate an answer to "Write me a function that computes fibonacci in Rust" and should give something along the following lines:
|
||||
|
||||
```
|
||||
Sure, here's a simple implementation of a function that computes the Fibonacci sequence in Rust. This function takes an integer `n` as an argument and returns the `n`th Fibonacci number.
|
||||
|
||||
fn fibonacci(n: u32) -> u32 {
|
||||
match n {
|
||||
0 => 0,
|
||||
1 => 1,
|
||||
_ => fibonacci(n - 1) + fibonacci(n - 2),
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let n = 10;
|
||||
println!("The {}th Fibonacci number is: {}", n, fibonacci(n));
|
||||
}
|
||||
|
||||
This function uses recursion to calculate the Fibonacci number. However, it's not the most efficient solution because it performs a lot of redundant calculations. A more efficient solution would use a loop to iteratively calculate the Fibonacci numbers.
|
||||
```
|
||||
|
||||
|
||||
### Fill-in-the-middle (FIM)
|
||||
|
||||
After installing `mistral_inference` and running `pip install --upgrade mistral_common` to make sure to have mistral_common>=1.2 installed:
|
||||
|
||||
```py
|
||||
from mistral_inference.transformer import Transformer
|
||||
from mistral_inference.generate import generate
|
||||
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
||||
from mistral_common.tokens.instruct.request import FIMRequest
|
||||
|
||||
tokenizer = MistralTokenizer.v3()
|
||||
model = Transformer.from_folder("~/codestral-22B-240529")
|
||||
|
||||
prefix = """def add("""
|
||||
suffix = """ return sum"""
|
||||
|
||||
request = FIMRequest(prompt=prefix, suffix=suffix)
|
||||
|
||||
tokens = tokenizer.encode_fim(request).tokens
|
||||
|
||||
out_tokens, _ = generate([tokens], model, max_tokens=256, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
||||
result = tokenizer.decode(out_tokens[0])
|
||||
|
||||
middle = result.split(suffix)[0].strip()
|
||||
print(middle)
|
||||
```
|
||||
|
||||
Should give something along the following lines:
|
||||
|
||||
```
|
||||
num1, num2):
|
||||
|
||||
# Add two numbers
|
||||
sum = num1 + num2
|
||||
|
||||
# return the sum
|
||||
```
|
||||
|
||||
## Usage with transformers library
|
||||
|
||||
This model is also compatible with `transformers` library, first run `pip install -U transformers` then use the snippet below to quickly get started:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_id = "mistralai/Codestral-22B-v0.1"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id)
|
||||
|
||||
text = "Hello my name is"
|
||||
inputs = tokenizer(text, return_tensors="pt")
|
||||
|
||||
outputs = model.generate(**inputs, max_new_tokens=20)
|
||||
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
By default, transformers will load the model in full precision. Therefore you might be interested to further reduce down the memory requirements to run the model through the optimizations we offer in HF ecosystem.
|
||||
|
||||
## Limitations
|
||||
|
||||
The Codestral-22B-v0.1 does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to
|
||||
make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.
|
||||
|
||||
## License
|
||||
|
||||
Codestral-22B-v0.1 is released under the `MNLP-0.1` license.
|
||||
|
||||
## The Mistral AI Team
|
||||
|
||||
Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Bam4d, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Henri Roussez, Jean-Malo Delignon, Jia Li, Justus Murke, Kartik Khandelwal, Lawrence Stewart, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Marjorie Janiewicz, Mickael Seznec, Nicolas Schuhl, Patrick von Platen, Romain Sauvestre, Pierre Stock, Sandeep Subramanian, Saurabh Garg, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibaut Lavril, Thibault Schueller, Timothée Lacroix, Théophile Gervet, Thomas Wang, Valera Nemychnikova, Wendy Shang, William El Sayed, William Marshall
|
||||
26
config.json
Normal file
26
config.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"_name_or_path": "mistral-community/Codestral-22B-v0.1",
|
||||
"architectures": [
|
||||
"MistralForCausalLM"
|
||||
],
|
||||
"attention_dropout": 0.0,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"hidden_act": "silu",
|
||||
"hidden_size": 6144,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 16384,
|
||||
"max_position_embeddings": 32768,
|
||||
"model_type": "mistral",
|
||||
"num_attention_heads": 48,
|
||||
"num_hidden_layers": 56,
|
||||
"num_key_value_heads": 8,
|
||||
"rms_norm_eps": 1e-05,
|
||||
"rope_theta": 1000000.0,
|
||||
"sliding_window": null,
|
||||
"tie_word_embeddings": false,
|
||||
"torch_dtype": "bfloat16",
|
||||
"transformers_version": "4.42.0.dev0",
|
||||
"use_cache": true,
|
||||
"vocab_size": 32768
|
||||
}
|
||||
1
configuration.json
Normal file
1
configuration.json
Normal file
@@ -0,0 +1 @@
|
||||
{"framework": "pytorch", "task": "text-generation", "allow_remote": true}
|
||||
3
consolidated.safetensors
Normal file
3
consolidated.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6675b83f2de8ab76c9c19e0b28508a2565598c141899b95671f039f89a945cf4
|
||||
size 44494620752
|
||||
6
generation_config.json
Normal file
6
generation_config.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"_from_model_config": true,
|
||||
"bos_token_id": 1,
|
||||
"eos_token_id": 2,
|
||||
"transformers_version": "4.42.0.dev0"
|
||||
}
|
||||
3
model-00001-of-00009.safetensors
Normal file
3
model-00001-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:6c911e66544527032c9e49f602ed0645f748045248eb8fb8ec9982866b899674
|
||||
size 4882298776
|
||||
3
model-00002-of-00009.safetensors
Normal file
3
model-00002-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cc1de07197a04eaeeaa6dcb7ed6604f729ed822e92273c25c112f85c366b5696
|
||||
size 4983012160
|
||||
3
model-00003-of-00009.safetensors
Normal file
3
model-00003-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:90dc483e3b22d3d21a03edd588a8ffe5743b8dea33fc9f1ffc01eb1e529aedf8
|
||||
size 4957821336
|
||||
3
model-00004-of-00009.safetensors
Normal file
3
model-00004-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:76ee31da7cdd8fde0a257030ffdf7d3fb293935a62b8469f6dec1c1a19e14eee
|
||||
size 4882323744
|
||||
3
model-00005-of-00009.safetensors
Normal file
3
model-00005-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:1d56824727ffaf568f7a1c7770fd5cb531df71ebe143567b1cb3968aca7f98cd
|
||||
size 4983012192
|
||||
3
model-00006-of-00009.safetensors
Normal file
3
model-00006-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9f30bb3fdbcad8d1c00e0b421908bebc6cb5544669cd3c916ae592acb7263ae4
|
||||
size 4957821336
|
||||
3
model-00007-of-00009.safetensors
Normal file
3
model-00007-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:54eb704485dce4f8c7c245169d25f394ea08dec1562a1ab981715f294ef93314
|
||||
size 4882323744
|
||||
3
model-00008-of-00009.safetensors
Normal file
3
model-00008-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:cfbe26e02d475904ecc92cbe54a614607156aabed3503867d8af5023673d6374
|
||||
size 4983012192
|
||||
3
model-00009-of-00009.safetensors
Normal file
3
model-00009-of-00009.safetensors
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:c834720ddae75dc683e52284ffe27ea35f48eb2c5500c71025925fe0dd398a8c
|
||||
size 4982999056
|
||||
514
model.safetensors.index.json
Normal file
514
model.safetensors.index.json
Normal file
@@ -0,0 +1,514 @@
|
||||
{
|
||||
"metadata": {
|
||||
"total_size": 44494565376
|
||||
},
|
||||
"weight_map": {
|
||||
"lm_head.weight": "model-00009-of-00009.safetensors",
|
||||
"model.embed_tokens.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.10.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.12.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.12.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.13.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.14.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.15.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.input_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.18.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.18.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.18.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.18.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00009.safetensors",
|
||||
"model.layers.19.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.19.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.2.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.20.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.20.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.21.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.22.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.input_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.mlp.down_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.post_attention_layernorm.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.23.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.24.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.24.mlp.gate_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.24.self_attn.k_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.self_attn.o_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.self_attn.q_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.24.self_attn.v_proj.weight": "model-00004-of-00009.safetensors",
|
||||
"model.layers.25.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.25.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.26.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.27.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.28.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.29.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.3.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.30.input_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.mlp.down_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.mlp.gate_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.mlp.up_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.post_attention_layernorm.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.self_attn.o_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.30.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.31.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.self_attn.k_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.31.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.31.self_attn.q_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.31.self_attn.v_proj.weight": "model-00005-of-00009.safetensors",
|
||||
"model.layers.32.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.32.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.33.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.34.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.35.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.input_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.mlp.down_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.mlp.up_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.post_attention_layernorm.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.36.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.37.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.37.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.37.mlp.gate_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.37.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.37.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.37.self_attn.k_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.37.self_attn.o_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.37.self_attn.q_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.37.self_attn.v_proj.weight": "model-00006-of-00009.safetensors",
|
||||
"model.layers.38.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.38.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.39.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.4.input_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.40.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.40.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.41.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.input_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.mlp.down_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.post_attention_layernorm.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.42.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.43.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.43.mlp.gate_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.mlp.up_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.43.self_attn.k_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.self_attn.o_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.self_attn.q_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.43.self_attn.v_proj.weight": "model-00007-of-00009.safetensors",
|
||||
"model.layers.44.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.44.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.45.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.46.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.47.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.48.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.input_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.mlp.down_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.mlp.gate_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.mlp.up_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.post_attention_layernorm.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.self_attn.o_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.49.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.5.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00009.safetensors",
|
||||
"model.layers.50.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.self_attn.k_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.50.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.50.self_attn.q_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.50.self_attn.v_proj.weight": "model-00008-of-00009.safetensors",
|
||||
"model.layers.51.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.51.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.52.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.53.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.54.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.input_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.mlp.down_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.mlp.gate_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.mlp.up_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.post_attention_layernorm.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.self_attn.k_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.self_attn.o_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.self_attn.q_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.55.self_attn.v_proj.weight": "model-00009-of-00009.safetensors",
|
||||
"model.layers.6.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.input_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00009.safetensors",
|
||||
"model.norm.weight": "model-00009-of-00009.safetensors"
|
||||
}
|
||||
}
|
||||
11
params.json
Normal file
11
params.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"dim": 6144,
|
||||
"n_layers": 56,
|
||||
"head_dim": 128,
|
||||
"hidden_dim": 16384,
|
||||
"n_heads": 48,
|
||||
"n_kv_heads": 8,
|
||||
"norm_eps": 1e-05,
|
||||
"vocab_size": 32768,
|
||||
"rope_theta": 1000000.0
|
||||
}
|
||||
23
special_tokens_map.json
Normal file
23
special_tokens_map.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
98793
tokenizer.json
Normal file
98793
tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9addc8bdce5988448ae81b729336f43a81262160ae8da760674badab9d4c7d33
|
||||
size 587591
|
||||
BIN
tokenizer.model.v3
Normal file
BIN
tokenizer.model.v3
Normal file
Binary file not shown.
6187
tokenizer_config.json
Normal file
6187
tokenizer_config.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user