初始化项目,由ModelHub XC社区提供模型

Model: QuixiAI/WizardLM-Uncensored-Falcon-40b
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-21 17:32:57 +08:00
commit 6bdd8d8b13
23 changed files with 133051 additions and 0 deletions

34
.gitattributes vendored Normal file
View File

@@ -0,0 +1,34 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
*.npy filter=lfs diff=lfs merge=lfs -text
*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
*.pickle filter=lfs diff=lfs merge=lfs -text
*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text

20
README.md Normal file
View File

@@ -0,0 +1,20 @@
---
license: apache-2.0
---
This is WizardLM trained on top of tiiuae/falcon-40b, with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.
Shout out to the open source AI/ML community, and everyone who helped me out.
Note:
An uncensored model has no guardrails.
You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car. Publishing anything this model generates is the same as publishing it yourself. You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it.
Prompt format is WizardLM.
```
What is a falcon? Can I keep one as a pet?
### Response:
```
Thank you [chirper.ai](https://chirper.ai) for sponsoring some of my compute!

29
config.json Normal file
View File

@@ -0,0 +1,29 @@
{
"_name_or_path": "/workspace/WizardLM-Uncensored-falcon-40b",
"alibi": false,
"apply_residual_connection_post_layernorm": false,
"architectures": [
"RWForCausalLM"
],
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_RW.RWConfig",
"AutoModelForCausalLM": "modelling_RW.RWForCausalLM"
},
"bias": false,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_dropout": 0.0,
"hidden_size": 8192,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "RefinedWeb",
"n_head": 128,
"n_head_kv": 8,
"n_layer": 60,
"parallel_attn": true,
"torch_dtype": "float16",
"transformers_version": "4.28.1",
"use_cache": true,
"vocab_size": 65025
}

75
configuration_RW.py Normal file
View File

@@ -0,0 +1,75 @@
# coding=utf-8
# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bloom configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class RWConfig(PretrainedConfig):
model_type = "RefinedWeb"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__(
self,
vocab_size=250880,
hidden_size=64,
n_layer=2,
n_head=8,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
bos_token_id=1,
eos_token_id=2,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0.0,
attention_dropout=0.0,
n_head_kv=None,
alibi=False,
**kwargs,
):
self.vocab_size = vocab_size
# Backward compatibility with n_embed kwarg
n_embed = kwargs.pop("n_embed", None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.n_head_kv = n_head if n_head_kv is None else n_head_kv
self.alibi = alibi
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@property
def head_dim(self):
return self.hidden_size // self.n_head
@property
def rotary(self):
return not self.alibi

6
generation_config.json Normal file
View File

@@ -0,0 +1,6 @@
{
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"transformers_version": "4.28.1"
}

1
latest Normal file
View File

@@ -0,0 +1 @@
global_step214

1106
modelling_RW.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e97cade97d491c63b4ca25ea42643a1f82eebb4449de557d00ee16fa2998813e
size 9504786525

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:92a62a1a4e93feac55014272c1bfc6dcc845fbe3f3241745f69fdda0b458ed91
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:249f63046b72a2469ecc5678752e2bff5b8a0d43f559f90778b545f364c27d9b
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7fda8200f0b5c1358f1461d31402e535783d418cfa221d92451ec8b3f51c769c
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:741baf6c6473f8254c24979c86b3dfd7eca78d44616e2be5745269fd13f84def
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f1897444e5dc96776f98393fa608f3641d1386b20ad4910354e53e7406ad7cb9
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c22e7bd5c9669c800e0f28866e7a16679b4b5a3024da51bd660c81e9a1809a25
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1a77d8b0d29c0d7b86adc64a624dd51ba873338e2c1ec77521461ff601315c15
size 9513159151

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fd228eaed363e85a0134f29147a579a4336b0572955e3c673ae4bef7196d81d2
size 7575236661

View File

@@ -0,0 +1,491 @@
{
"metadata": {
"total_size": 83671973888
},
"weight_map": {
"lm_head.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.0.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.10.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.13.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.13.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.14.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.2.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.20.ln_attn.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.ln_attn.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.ln_mlp.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.ln_mlp.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.20.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.20.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.21.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.ln_attn.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.ln_attn.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.ln_mlp.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.ln_mlp.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.27.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.27.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.28.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.3.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.30.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.ln_attn.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.ln_attn.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.ln_mlp.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.ln_mlp.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.34.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.34.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.35.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.4.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.40.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.ln_attn.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.ln_attn.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.ln_mlp.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.ln_mlp.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.41.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.41.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.42.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.ln_attn.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.ln_attn.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.ln_mlp.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.ln_mlp.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.48.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.48.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.49.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.5.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.50.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.ln_attn.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.ln_attn.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.ln_mlp.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.ln_mlp.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.55.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.55.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.56.ln_attn.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.ln_attn.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.ln_mlp.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.ln_mlp.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.ln_attn.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.ln_attn.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.ln_mlp.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.ln_mlp.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.ln_attn.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.ln_attn.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.ln_mlp.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.ln_mlp.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.ln_attn.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.ln_attn.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.ln_mlp.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.ln_mlp.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.6.ln_attn.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.ln_attn.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.ln_mlp.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.ln_mlp.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.6.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.6.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.7.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.ln_attn.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.ln_attn.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.ln_mlp.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.ln_mlp.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.ln_f.bias": "pytorch_model-00009-of-00009.bin",
"transformer.ln_f.weight": "pytorch_model-00009-of-00009.bin",
"transformer.word_embeddings.weight": "pytorch_model-00001-of-00009.bin"
}
}

17
special_tokens_map.json Normal file
View File

@@ -0,0 +1,17 @@
{
"additional_special_tokens": [
">>TITLE<<",
">>ABSTRACT<<",
">>INTRODUCTION<<",
">>SUMMARY<<",
">>COMMENT<<",
">>ANSWER<<",
">>QUESTION<<",
">>DOMAIN<<",
">>PREFIX<<",
">>SUFFIX<<",
">>MIDDLE<<"
],
"eos_token": "<|endoftext|>",
"pad_token": "[PAD]"
}

129992
tokenizer.json Normal file

File diff suppressed because it is too large Load Diff

8
tokenizer_config.json Normal file
View File

@@ -0,0 +1,8 @@
{
"add_prefix_space": false,
"clean_up_tokenization_spaces": true,
"eos_token": "<|endoftext|>",
"model_max_length": 2048,
"padding_side": "right",
"tokenizer_class": "PreTrainedTokenizerFast"
}

658
trainer_state.json Normal file
View File

@@ -0,0 +1,658 @@
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9930151338766007,
"global_step": 214,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2e-05,
"loss": 0.8616,
"step": 2
},
{
"epoch": 0.04,
"learning_rate": 1.9995608365087945e-05,
"loss": 0.7122,
"step": 4
},
{
"epoch": 0.06,
"learning_rate": 1.9982437317643218e-05,
"loss": 0.6609,
"step": 6
},
{
"epoch": 0.07,
"learning_rate": 1.996049842615217e-05,
"loss": 0.6289,
"step": 8
},
{
"epoch": 0.09,
"learning_rate": 1.992981096013517e-05,
"loss": 0.6091,
"step": 10
},
{
"epoch": 0.11,
"learning_rate": 1.9890401873221642e-05,
"loss": 0.5863,
"step": 12
},
{
"epoch": 0.13,
"learning_rate": 1.984230577947597e-05,
"loss": 0.5834,
"step": 14
},
{
"epoch": 0.15,
"learning_rate": 1.9785564922995042e-05,
"loss": 0.5628,
"step": 16
},
{
"epoch": 0.17,
"learning_rate": 1.972022914080411e-05,
"loss": 0.5446,
"step": 18
},
{
"epoch": 0.19,
"learning_rate": 1.964635581908359e-05,
"loss": 0.5434,
"step": 20
},
{
"epoch": 0.2,
"learning_rate": 1.9564009842765225e-05,
"loss": 0.5196,
"step": 22
},
{
"epoch": 0.22,
"learning_rate": 1.9473263538541916e-05,
"loss": 0.5292,
"step": 24
},
{
"epoch": 0.24,
"learning_rate": 1.9374196611341212e-05,
"loss": 0.5139,
"step": 26
},
{
"epoch": 0.26,
"learning_rate": 1.9266896074318335e-05,
"loss": 0.5088,
"step": 28
},
{
"epoch": 0.28,
"learning_rate": 1.9151456172430186e-05,
"loss": 0.5079,
"step": 30
},
{
"epoch": 0.3,
"learning_rate": 1.9027978299657436e-05,
"loss": 0.5086,
"step": 32
},
{
"epoch": 0.32,
"learning_rate": 1.8896570909947477e-05,
"loss": 0.5024,
"step": 34
},
{
"epoch": 0.34,
"learning_rate": 1.875734942195637e-05,
"loss": 0.5046,
"step": 36
},
{
"epoch": 0.35,
"learning_rate": 1.8610436117673557e-05,
"loss": 0.4777,
"step": 38
},
{
"epoch": 0.37,
"learning_rate": 1.845596003501826e-05,
"loss": 0.5006,
"step": 40
},
{
"epoch": 0.39,
"learning_rate": 1.829405685450202e-05,
"loss": 0.4796,
"step": 42
},
{
"epoch": 0.41,
"learning_rate": 1.8124868780056814e-05,
"loss": 0.4701,
"step": 44
},
{
"epoch": 0.43,
"learning_rate": 1.7948544414133534e-05,
"loss": 0.4784,
"step": 46
},
{
"epoch": 0.45,
"learning_rate": 1.7765238627180424e-05,
"loss": 0.4773,
"step": 48
},
{
"epoch": 0.47,
"learning_rate": 1.7575112421616203e-05,
"loss": 0.4783,
"step": 50
},
{
"epoch": 0.48,
"learning_rate": 1.7378332790417275e-05,
"loss": 0.4772,
"step": 52
},
{
"epoch": 0.5,
"learning_rate": 1.717507257044331e-05,
"loss": 0.4671,
"step": 54
},
{
"epoch": 0.52,
"learning_rate": 1.6965510290629973e-05,
"loss": 0.4716,
"step": 56
},
{
"epoch": 0.54,
"learning_rate": 1.6749830015182106e-05,
"loss": 0.4539,
"step": 58
},
{
"epoch": 0.56,
"learning_rate": 1.6528221181905217e-05,
"loss": 0.4613,
"step": 60
},
{
"epoch": 0.58,
"learning_rate": 1.6300878435817115e-05,
"loss": 0.4758,
"step": 62
},
{
"epoch": 0.6,
"learning_rate": 1.6068001458185934e-05,
"loss": 0.4623,
"step": 64
},
{
"epoch": 0.61,
"learning_rate": 1.5829794791144723e-05,
"loss": 0.4734,
"step": 66
},
{
"epoch": 0.63,
"learning_rate": 1.5586467658036526e-05,
"loss": 0.4512,
"step": 68
},
{
"epoch": 0.65,
"learning_rate": 1.533823377964791e-05,
"loss": 0.4713,
"step": 70
},
{
"epoch": 0.67,
"learning_rate": 1.5085311186492206e-05,
"loss": 0.4789,
"step": 72
},
{
"epoch": 0.69,
"learning_rate": 1.482792202730745e-05,
"loss": 0.5663,
"step": 74
},
{
"epoch": 0.71,
"learning_rate": 1.4566292373937133e-05,
"loss": 0.4551,
"step": 76
},
{
"epoch": 0.73,
"learning_rate": 1.4300652022765207e-05,
"loss": 0.461,
"step": 78
},
{
"epoch": 0.75,
"learning_rate": 1.4031234292879726e-05,
"loss": 0.4673,
"step": 80
},
{
"epoch": 0.76,
"learning_rate": 1.3758275821142382e-05,
"loss": 0.4589,
"step": 82
},
{
"epoch": 0.78,
"learning_rate": 1.348201635434399e-05,
"loss": 0.4495,
"step": 84
},
{
"epoch": 0.8,
"learning_rate": 1.3202698538628376e-05,
"loss": 0.4645,
"step": 86
},
{
"epoch": 0.82,
"learning_rate": 1.292056770636976e-05,
"loss": 0.4555,
"step": 88
},
{
"epoch": 0.84,
"learning_rate": 1.2635871660690677e-05,
"loss": 0.4464,
"step": 90
},
{
"epoch": 0.86,
"learning_rate": 1.234886045780984e-05,
"loss": 0.4646,
"step": 92
},
{
"epoch": 0.88,
"learning_rate": 1.2059786187410984e-05,
"loss": 0.4599,
"step": 94
},
{
"epoch": 0.89,
"learning_rate": 1.176890275122573e-05,
"loss": 0.4534,
"step": 96
},
{
"epoch": 0.91,
"learning_rate": 1.1476465640024814e-05,
"loss": 0.4744,
"step": 98
},
{
"epoch": 0.93,
"learning_rate": 1.1182731709213658e-05,
"loss": 0.4626,
"step": 100
},
{
"epoch": 0.95,
"learning_rate": 1.0887958953229349e-05,
"loss": 0.4407,
"step": 102
},
{
"epoch": 0.97,
"learning_rate": 1.0592406278937143e-05,
"loss": 0.452,
"step": 104
},
{
"epoch": 0.99,
"learning_rate": 1.0296333278225599e-05,
"loss": 0.4496,
"step": 106
},
{
"epoch": 1.01,
"learning_rate": 1e-05,
"loss": 0.4138,
"step": 108
},
{
"epoch": 1.02,
"learning_rate": 9.703666721774403e-06,
"loss": 0.2924,
"step": 110
},
{
"epoch": 1.04,
"learning_rate": 9.407593721062858e-06,
"loss": 0.3184,
"step": 112
},
{
"epoch": 1.06,
"learning_rate": 9.112041046770653e-06,
"loss": 0.2885,
"step": 114
},
{
"epoch": 1.08,
"learning_rate": 8.817268290786343e-06,
"loss": 0.2842,
"step": 116
},
{
"epoch": 1.1,
"learning_rate": 8.52353435997519e-06,
"loss": 0.2763,
"step": 118
},
{
"epoch": 1.12,
"learning_rate": 8.231097248774273e-06,
"loss": 0.2765,
"step": 120
},
{
"epoch": 1.14,
"learning_rate": 7.940213812589018e-06,
"loss": 0.2788,
"step": 122
},
{
"epoch": 1.15,
"learning_rate": 7.651139542190164e-06,
"loss": 0.2821,
"step": 124
},
{
"epoch": 1.17,
"learning_rate": 7.364128339309326e-06,
"loss": 0.2834,
"step": 126
},
{
"epoch": 1.19,
"learning_rate": 7.079432293630244e-06,
"loss": 0.2728,
"step": 128
},
{
"epoch": 1.21,
"learning_rate": 6.797301461371626e-06,
"loss": 0.274,
"step": 130
},
{
"epoch": 1.23,
"learning_rate": 6.517983645656014e-06,
"loss": 0.276,
"step": 132
},
{
"epoch": 1.25,
"learning_rate": 6.241724178857621e-06,
"loss": 0.2723,
"step": 134
},
{
"epoch": 1.27,
"learning_rate": 5.96876570712028e-06,
"loss": 0.2876,
"step": 136
},
{
"epoch": 1.29,
"learning_rate": 5.699347977234799e-06,
"loss": 0.2714,
"step": 138
},
{
"epoch": 1.3,
"learning_rate": 5.43370762606287e-06,
"loss": 0.2758,
"step": 140
},
{
"epoch": 1.32,
"learning_rate": 5.172077972692553e-06,
"loss": 0.2816,
"step": 142
},
{
"epoch": 1.34,
"learning_rate": 4.914688813507798e-06,
"loss": 0.2787,
"step": 144
},
{
"epoch": 1.36,
"learning_rate": 4.661766220352098e-06,
"loss": 0.2801,
"step": 146
},
{
"epoch": 1.38,
"learning_rate": 4.413532341963477e-06,
"loss": 0.271,
"step": 148
},
{
"epoch": 1.4,
"learning_rate": 4.170205208855281e-06,
"loss": 0.2708,
"step": 150
},
{
"epoch": 1.42,
"learning_rate": 3.931998541814069e-06,
"loss": 0.2728,
"step": 152
},
{
"epoch": 1.43,
"learning_rate": 3.6991215641828903e-06,
"loss": 0.2719,
"step": 154
},
{
"epoch": 1.45,
"learning_rate": 3.4717788180947855e-06,
"loss": 0.272,
"step": 156
},
{
"epoch": 1.47,
"learning_rate": 3.250169984817897e-06,
"loss": 0.2724,
"step": 158
},
{
"epoch": 1.49,
"learning_rate": 3.0344897093700333e-06,
"loss": 0.2744,
"step": 160
},
{
"epoch": 1.51,
"learning_rate": 2.8249274295566863e-06,
"loss": 0.2736,
"step": 162
},
{
"epoch": 1.53,
"learning_rate": 2.6216672095827267e-06,
"loss": 0.2681,
"step": 164
},
{
"epoch": 1.55,
"learning_rate": 2.424887578383799e-06,
"loss": 0.2703,
"step": 166
},
{
"epoch": 1.56,
"learning_rate": 2.234761372819577e-06,
"loss": 0.2649,
"step": 168
},
{
"epoch": 1.58,
"learning_rate": 2.0514555858664663e-06,
"loss": 0.2781,
"step": 170
},
{
"epoch": 1.6,
"learning_rate": 1.875131219943187e-06,
"loss": 0.2645,
"step": 172
},
{
"epoch": 1.62,
"learning_rate": 1.7059431454979825e-06,
"loss": 0.27,
"step": 174
},
{
"epoch": 1.64,
"learning_rate": 1.5440399649817384e-06,
"loss": 0.2683,
"step": 176
},
{
"epoch": 1.66,
"learning_rate": 1.3895638823264447e-06,
"loss": 0.2645,
"step": 178
},
{
"epoch": 1.68,
"learning_rate": 1.2426505780436326e-06,
"loss": 0.2712,
"step": 180
},
{
"epoch": 1.69,
"learning_rate": 1.1034290900525279e-06,
"loss": 0.2621,
"step": 182
},
{
"epoch": 1.71,
"learning_rate": 9.720217003425648e-07,
"loss": 0.268,
"step": 184
},
{
"epoch": 1.73,
"learning_rate": 8.485438275698154e-07,
"loss": 0.2641,
"step": 186
},
{
"epoch": 1.75,
"learning_rate": 7.331039256816664e-07,
"loss": 0.2623,
"step": 188
},
{
"epoch": 1.77,
"learning_rate": 6.258033886587911e-07,
"loss": 0.2714,
"step": 190
},
{
"epoch": 1.79,
"learning_rate": 5.267364614580861e-07,
"loss": 0.2723,
"step": 192
},
{
"epoch": 1.81,
"learning_rate": 4.359901572347758e-07,
"loss": 0.2611,
"step": 194
},
{
"epoch": 1.83,
"learning_rate": 3.5364418091641374e-07,
"loss": 0.2724,
"step": 196
},
{
"epoch": 1.84,
"learning_rate": 2.7977085919589253e-07,
"loss": 0.2738,
"step": 198
},
{
"epoch": 1.86,
"learning_rate": 2.1443507700495968e-07,
"loss": 0.2623,
"step": 200
},
{
"epoch": 1.88,
"learning_rate": 1.5769422052403172e-07,
"loss": 0.2695,
"step": 202
},
{
"epoch": 1.9,
"learning_rate": 1.0959812677835968e-07,
"loss": 0.2661,
"step": 204
},
{
"epoch": 1.92,
"learning_rate": 7.018903986483083e-08,
"loss": 0.2712,
"step": 206
},
{
"epoch": 1.94,
"learning_rate": 3.950157384783104e-08,
"loss": 0.2706,
"step": 208
},
{
"epoch": 1.96,
"learning_rate": 1.7562682356786488e-08,
"loss": 0.276,
"step": 210
},
{
"epoch": 1.97,
"learning_rate": 4.39163491205652e-09,
"loss": 0.2767,
"step": 212
},
{
"epoch": 1.99,
"learning_rate": 0.0,
"loss": 0.2624,
"step": 214
}
],
"max_steps": 214,
"num_train_epochs": 2,
"total_flos": 1536113535614976.0,
"trial_name": null,
"trial_params": null
}

3
training_args.bin Normal file
View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fe7ade02010de890951f8d2a3a60933d8a512b39c7bdca5d9951b162a710a84e
size 5051

584
zero_to_fp32.py Normal file
View File

@@ -0,0 +1,584 @@
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
# application.
#
# example: python zero_to_fp32.py . pytorch_model.bin
import argparse
import torch
import glob
import math
import os
import re
from collections import OrderedDict
from dataclasses import dataclass
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
# DeepSpeed data structures it has to be available in the current python environment.
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
@dataclass
class zero_model_state:
buffers: dict()
param_shapes: dict()
shared_params: list
ds_version: int
frozen_param_shapes: dict()
frozen_param_fragments: dict()
debug = 0
# load to cpu
device = torch.device('cpu')
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_model_state_file(checkpoint_dir, zero_stage):
if not os.path.isdir(checkpoint_dir):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
# there should be only one file
if zero_stage == 2:
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
elif zero_stage == 3:
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
if not os.path.exists(file):
raise FileNotFoundError(f"can't find model states file at '{file}'")
return file
def get_checkpoint_files(checkpoint_dir, glob_pattern):
# XXX: need to test that this simple glob rule works for multi-node setup too
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
if len(ckpt_files) == 0:
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
return ckpt_files
def get_optim_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
def get_model_state_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
def parse_model_states(files):
zero_model_states = []
for file in files:
state_dict = torch.load(file, map_location=device)
if BUFFER_NAMES not in state_dict:
raise ValueError(f"{file} is not a model state checkpoint")
buffer_names = state_dict[BUFFER_NAMES]
if debug:
print("Found buffers:", buffer_names)
# recover just the buffers while restoring them to fp32 if they were saved in fp16
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
param_shapes = state_dict[PARAM_SHAPES]
# collect parameters that are included in param_shapes
param_names = []
for s in param_shapes:
for name in s.keys():
param_names.append(name)
# update with frozen parameters
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
if frozen_param_shapes is not None:
if debug:
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
param_names += list(frozen_param_shapes.keys())
# record shared parameters so that they can be recovered based on partners
# this is because such parameters holding reference only are not saved by optimizer
shared_params = []
for param in state_dict["module"]:
if param not in [*param_names, *buffer_names]:
for share_param in state_dict["module"]:
if (state_dict["module"][share_param].data_ptr() == state_dict["module"][param].data_ptr()
and share_param != param):
shared_params.append([param, share_param])
break
ds_version = state_dict.get(DS_VERSION, None)
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
z_model_state = zero_model_state(buffers=buffers,
param_shapes=param_shapes,
shared_params=shared_params,
ds_version=ds_version,
frozen_param_shapes=frozen_param_shapes,
frozen_param_fragments=frozen_param_fragments)
zero_model_states.append(z_model_state)
return zero_model_states
def parse_optim_states(files, ds_checkpoint_dir):
total_files = len(files)
state_dicts = []
for f in files:
state_dicts.append(torch.load(f, map_location=device))
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
raise ValueError(f"{files[0]} is not a zero checkpoint")
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
# parameters can be different from data parallelism for non-expert parameters. So we can just
# use the max of the partition_count to get the dp world_size.
if type(world_size) is list:
world_size = max(world_size)
if world_size != total_files:
raise ValueError(
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
)
# the groups are named differently in each stage
if zero_stage == 2:
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
elif zero_stage == 3:
fp32_groups_key = FP32_FLAT_GROUPS
else:
raise ValueError(f"unknown zero stage {zero_stage}")
if zero_stage == 2:
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
elif zero_stage == 3:
# if there is more than one param group, there will be multiple flattened tensors - one
# flattened tensor per group - for simplicity merge them into a single tensor
#
# XXX: could make the script more memory efficient for when there are multiple groups - it
# will require matching the sub-lists of param_shapes for each param group flattened tensor
fp32_flat_groups = [
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
]
return zero_stage, world_size, fp32_flat_groups
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
"""
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
"""
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
model_files = get_model_state_files(ds_checkpoint_dir)
zero_model_states = parse_model_states(model_files)
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
if zero_stage == 2:
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
elif zero_stage == 3:
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
def _zero2_merge_frozen_params(state_dict, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
if debug:
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
state_dict[name] = frozen_param_fragments[name]
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
# Reconstruction protocol:
#
# XXX: document this
if debug:
for i in range(world_size):
for j in range(len(fp32_flat_groups[0])):
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
# XXX: memory usage doubles here (zero2)
num_param_groups = len(fp32_flat_groups[0])
merged_single_partition_of_fp32_groups = []
for i in range(num_param_groups):
merged_partitions = [sd[i] for sd in fp32_flat_groups]
full_single_fp32_vector = torch.cat(merged_partitions, 0)
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
avail_numel = sum(
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
if debug:
wanted_params = sum([len(shapes) for shapes in param_shapes])
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
# not asserting if there is a mismatch due to possible padding
print(f"Have {avail_numel} numels to process.")
print(f"Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
total_numel = 0
total_params = 0
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
offset = 0
avail_numel = full_single_fp32_vector.numel()
for name, shape in shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
offset += unpartitioned_numel
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
# live optimizer object, so we are checking that the numbers are within the right range
align_to = 2 * world_size
def zero2_align(x):
return align_to * math.ceil(x / align_to)
if debug:
print(f"original offset={offset}, avail_numel={avail_numel}")
offset = zero2_align(offset)
avail_numel = zero2_align(avail_numel)
if debug:
print(f"aligned offset={offset}, avail_numel={avail_numel}")
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero2_merge_frozen_params(state_dict, zero_model_states)
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
remainder = unpartitioned_numel % world_size
padding_numel = (world_size - remainder) if remainder else 0
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
return partitioned_numel, padding_numel
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
if debug:
for i in range(world_size):
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in zero_model_states[0].frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
avail_numel = fp32_flat_groups[0].numel() * world_size
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
# param, re-consolidating each param, while dealing with padding if any
# merge list of dicts, preserving order
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
if debug:
for i in range(world_size):
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
wanted_params = len(param_shapes)
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
# not asserting if there is a mismatch due to possible padding
avail_numel = fp32_flat_groups[0].numel() * world_size
print(f"Trainable params: Have {avail_numel} numels to process.")
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
offset = 0
total_numel = 0
total_params = 0
for name, shape in param_shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
# XXX: memory usage doubles here
state_dict[name] = torch.cat(
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
0).narrow(0, 0, unpartitioned_numel).view(shape)
offset += partitioned_numel
offset *= world_size
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
via a model hub.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
Returns:
- pytorch ``state_dict``
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
the checkpoint.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
# do the training and checkpoint saving
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
model = model.cpu() # move to cpu
model.load_state_dict(state_dict)
# submit to model hub or save the model to share with others
In this example the ``model`` will no longer be usable in the deepspeed context of the same
application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
"""
if tag is None:
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
if not os.path.isdir(ds_checkpoint_dir):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
"""
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
print(f"Saving fp32 state dict to {output_file}")
torch.save(state_dict, output_file)
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
"""
1. Put the provided model to cpu
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
3. Load it into the provided model
Args:
- ``model``: the model object to update
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
Returns:
- ``model`: modified model
Make sure you have plenty of CPU memory available before you call this function. If you don't
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
conveniently placed for you in the checkpoint folder.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
# submit to model hub or save the model to share with others
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
"""
logger.info(f"Extracting fp32 weights")
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
logger.info(f"Overwriting model with fp32 weights")
model = model.cpu()
model.load_state_dict(state_dict, strict=False)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir",
type=str,
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
parser.add_argument(
"output_file",
type=str,
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
args = parser.parse_args()
debug = args.debug
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)