初始化项目,由ModelHub XC社区提供模型

Model: MiniLLM/SFT-OPT-6.7B
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-05-05 23:48:39 +08:00
commit 7b8e045569
13 changed files with 151068 additions and 0 deletions

35
.gitattributes vendored Normal file
View File

@@ -0,0 +1,35 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

34
README.md Normal file
View File

@@ -0,0 +1,34 @@
---
license: apache-2.0
datasets:
- databricks/databricks-dolly-15k
language:
- en
metrics:
- rouge
base_model:
- facebook/opt-6.7B
pipeline_tag: text-generation
---
# SFT-OPT-6.7B
[paper](https://arxiv.org/abs/2306.08543) | [code](https://github.com/microsoft/LMOps/tree/main/minillm)
**SFT-OPT-6.7B** is an OPT-6.7B model supervised fine-tuned on [databricks-dolly-15k](https://huggingface.co/datasets/aisquared/databricks-dolly-15k).
It is used as a baseline for [MiniLLM](https://huggingface.co/MiniLLM/MiniLLM-OPT-6.7B).
## Other Baselines
+ [KD](https://huggingface.co/MiniLLM/KD-OPT-6.7B)
+ [SeqKD](https://huggingface.co/MiniLLM/SeqKD-OPT-6.7B)
## Citation
```
@inproceedings{minillm,
title={MiniLLM: Knowledge Distillation of Large Language Models},
author={Gu, Yuxian and Dong, Li and Wei, Furu and Huang, Minlie},
booktitle={Proceedings of ICLR},
year={2024}
}
```

32
config.json Normal file
View File

@@ -0,0 +1,32 @@
{
"_name_or_path": "/data/yuxian/MiniLLM_dir/results/opt/train/sft/opt-6.7B",
"_remove_final_layer_norm": false,
"activation_dropout": 0.0,
"activation_function": "relu",
"architectures": [
"OPTForCausalLM"
],
"attention_dropout": 0.0,
"bos_token_id": 2,
"do_layer_norm_before": true,
"dropout": 0.1,
"enable_bias": true,
"eos_token_id": 2,
"ffn_dim": 16384,
"hidden_size": 4096,
"init_std": 0.02,
"is_model_parallel": true,
"layer_norm_elementwise_affine": true,
"layerdrop": 0.0,
"max_position_embeddings": 2048,
"model_type": "opt",
"num_attention_heads": 32,
"num_hidden_layers": 32,
"pad_token_id": 1,
"prefix": "</s>",
"torch_dtype": "float16",
"transformers_version": "4.42.0.dev0",
"use_cache": true,
"vocab_size": 50272,
"word_embed_proj_dim": 4096
}

7
generation_config.json Normal file
View File

@@ -0,0 +1,7 @@
{
"_from_model_config": true,
"bos_token_id": 2,
"eos_token_id": 2,
"pad_token_id": 1,
"transformers_version": "4.42.0.dev0"
}

50001
merges.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:52ac6ad7d78917d426a66f59e49ebb19e4f99a4d89be24689c557276e9d971f8
size 4993326868

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8d9167d7f0ae104fd872629de6ebb9e91fcdb76ed9664be6b70439c645943bbb
size 4967433288

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:314253513b2ec925aac057274457b778ce53a500329e465dc675725bab7d22c3
size 3356364946

View File

@@ -0,0 +1,524 @@
{
"metadata": {
"total_size": 13316947968
},
"weight_map": {
"lm_head.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.embed_positions.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.embed_tokens.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.0.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.1.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.10.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.11.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.12.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.12.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.13.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.14.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.15.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.16.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.17.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.18.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.19.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.2.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.2.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.20.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.20.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.21.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.fc2.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.fc2.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.final_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.final_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.22.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.fc1.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.fc1.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.23.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.23.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.23.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn_layer_norm.bias": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.23.self_attn_layer_norm.weight": "pytorch_model-00002-of-00003.bin",
"model.decoder.layers.24.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.24.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.25.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.26.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.27.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.28.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.29.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.3.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.3.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.30.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.30.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.fc1.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.fc1.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.fc2.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.fc2.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.final_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.final_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn_layer_norm.bias": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.31.self_attn_layer_norm.weight": "pytorch_model-00003-of-00003.bin",
"model.decoder.layers.4.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.4.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.5.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.6.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.7.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.8.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.fc1.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.fc1.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.fc2.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.fc2.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.final_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.final_layer_norm.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn_layer_norm.bias": "pytorch_model-00001-of-00003.bin",
"model.decoder.layers.9.self_attn_layer_norm.weight": "pytorch_model-00001-of-00003.bin"
}
}

30
special_tokens_map.json Normal file
View File

@@ -0,0 +1,30 @@
{
"bos_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

100365
tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

30
tokenizer_config.json Normal file
View File

@@ -0,0 +1,30 @@
{
"add_bos_token": true,
"add_prefix_space": false,
"added_tokens_decoder": {
"1": {
"content": "<pad>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
},
"2": {
"content": "</s>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false,
"special": true
}
},
"bos_token": "</s>",
"clean_up_tokenization_spaces": true,
"eos_token": "</s>",
"errors": "replace",
"model_max_length": 1000000000000000019884624838656,
"pad_token": "</s>",
"tokenizer_class": "GPT2Tokenizer",
"unk_token": "</s>"
}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long