初始化项目,由ModelHub XC社区提供模型
Model: xu1998hz/instructscore_caption Source: Original Platform
This commit is contained in:
35
.gitattributes
vendored
Normal file
35
.gitattributes
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||||
3
added_tokens.json
Normal file
3
added_tokens.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"[PAD]": 32000
|
||||||
|
}
|
||||||
24
config.json
Normal file
24
config.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "decapoda-research/llama-7b-hf",
|
||||||
|
"architectures": [
|
||||||
|
"LlamaForCausalLM"
|
||||||
|
],
|
||||||
|
"bos_token_id": 0,
|
||||||
|
"eos_token_id": 1,
|
||||||
|
"hidden_act": "silu",
|
||||||
|
"hidden_size": 4096,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 11008,
|
||||||
|
"max_position_embeddings": 2048,
|
||||||
|
"max_sequence_length": 2048,
|
||||||
|
"model_type": "llama",
|
||||||
|
"num_attention_heads": 32,
|
||||||
|
"num_hidden_layers": 32,
|
||||||
|
"pad_token_id": -1,
|
||||||
|
"rms_norm_eps": 1e-06,
|
||||||
|
"tie_word_embeddings": false,
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"transformers_version": "4.29.2",
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 32001
|
||||||
|
}
|
||||||
7
generation_config.json
Normal file
7
generation_config.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"_from_model_config": true,
|
||||||
|
"bos_token_id": 0,
|
||||||
|
"eos_token_id": 1,
|
||||||
|
"pad_token_id": 0,
|
||||||
|
"transformers_version": "4.29.2"
|
||||||
|
}
|
||||||
1
id_rsa.pub
Normal file
1
id_rsa.pub
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVKQ2ytnBTZdaIptWwmQDQIEXNyMPQBYNGC6k0OclGETZwVXy7SsQ5DSGudPqjqadbyjcP7Sxc8EykTD9qafi76CVBKljClOlueJkL0aqAFfGamn1kyJ2qAjw7+WUiehYYCRMdKW9q1by89wqMqY5CE/b900ietdr6JYPMBRPoS/yt63HrwgXVdGDk7BFQ6V2C+pRfdj6OLsM5/8ARmFNBSvQmX96ToGvK3xSILIqu3lzV92HGsdealUnBdMwXtX2vG9Ituw9rcYwPyhGqa/RlrsYVEp4B7Q+5858ZD0Pwe3H0sWxQ2uBGZWa6KeE3VXs2NxwLzdjB89VdZ9dPXJFT+VCjuvui5cy6r1+sgKJZaGnYUDOA5D1ipjjh7g/oWeGj+8rqVscrALdDOggwLA1ejtS5hy1ZvRMrAI1M/ueWoPm0HRc64LAZoUjjWhEoBsjfLXnva/G/ai6DmR/2qkePvoJipulrWjgBv7q0lEAb4WnjOdX9XNc6ZQiY1hzfcmM= wenda@wenda-Inspiron-3593
|
||||||
7
length_check.py
Normal file
7
length_check.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
from datasets import load_dataset
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
data_cnn = load_dataset("cnn_dailymail", '3.0.0')
|
||||||
|
data_xsum = load_dataset("xsum")
|
||||||
|
|
||||||
|
print(data_xsum)
|
||||||
3
pytorch_model.bin
Normal file
3
pytorch_model.bin
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:446726817ff6e57a8ecd26703077946f1f09ebc6a56993df42a08fcb1492657c
|
||||||
|
size 13476958625
|
||||||
3
rng_state_0.pth
Normal file
3
rng_state_0.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:4c1d4aab692d41c18c919a673e506a5264ad9de9ff132b0de5dab52510157c75
|
||||||
|
size 21687
|
||||||
3
rng_state_1.pth
Normal file
3
rng_state_1.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:72575f2b4522908157bba855533d15354ced6598ecdc09ca8ce5417fa99c9b92
|
||||||
|
size 21687
|
||||||
3
rng_state_2.pth
Normal file
3
rng_state_2.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:eabb43b1fac99cd8e34e58482a942b31a1855cd53120560e1428e8a2aae185e9
|
||||||
|
size 21687
|
||||||
3
rng_state_3.pth
Normal file
3
rng_state_3.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:cfd9568c2d39f428f86eb5a99beb6f19ac57f3f024ec4494b8cadd107d148298
|
||||||
|
size 21687
|
||||||
3
rng_state_4.pth
Normal file
3
rng_state_4.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:21ab6cb42d703108752ec781af0a5227b721321c411bb7f7bca00b6ec4e15324
|
||||||
|
size 21687
|
||||||
3
rng_state_5.pth
Normal file
3
rng_state_5.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:c9fe828193c13a4a1adccaf24735d85428cb925bfa6204603f3a0a4a34c8cec9
|
||||||
|
size 21687
|
||||||
3
rng_state_6.pth
Normal file
3
rng_state_6.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:36e15c4dcb706b97a74d0f8259d6466dd2e46e60fd71dcb2c6d99e1e9ff2f204
|
||||||
|
size 21687
|
||||||
3
rng_state_7.pth
Normal file
3
rng_state_7.pth
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:7a467ddda52bc75625e56adc924121f89adb1e65a5733eec05f6284586a9a926
|
||||||
|
size 21687
|
||||||
6
special_tokens_map.json
Normal file
6
special_tokens_map.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"bos_token": "</s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"pad_token": "[PAD]",
|
||||||
|
"unk_token": "</s>"
|
||||||
|
}
|
||||||
1
tion_ssh_key.pub
Normal file
1
tion_ssh_key.pub
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVgp/wj0CXuAA2dP/swXwcu5e0snmQETPF/HIXKNteUZ46+wOSpvwpuk/D2hrl86bj3GOdiGLtZC51nUWnOxrHv7a3MUbFlbKYpKKv2LtEQuSoY5nhSVWIJZnhio12tStvuqr6eat4RZqHm5jZ+vHNfb8GMiPxi5jM59k7nbZm4Fc6XaqJXeOxRsMCTyDI0T7RR2FA0T+CJTyQsDeGXAxclq2P9cXUy9vKfrC68bI87k5ZzXbNDT9HcWKLoTWYHxTHvRwIQh7GKaZiXQ3Q6cph9Jcn7Xr3sbWA8adTzCCVxcfO7X50tBBO1SXNBZvTHEfKYuXSvqCIw/034+0QQ1WEc9rse8y/7KKoOyenf1rcQ/RadmsxWHu0oGgOQIs6gyXmzS38cE31qnHzNducK+yw7XBVFUrUWTqAdaXsFaTL39nbXGSYgaCfQKWOeJXl2cM0F6PG3eWAEiZvpgMS9+Yl+DZzqksCR/uQGLOX/A5YtPpSn5W1HDl7FLly9b5xWrc= wenda@wenda-Inspiron-3593
|
||||||
3
tokenizer.model
Normal file
3
tokenizer.model
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
||||||
|
size 499723
|
||||||
34
tokenizer_config.json
Normal file
34
tokenizer_config.json
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"add_bos_token": true,
|
||||||
|
"add_eos_token": false,
|
||||||
|
"bos_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"clean_up_tokenization_spaces": false,
|
||||||
|
"eos_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"model_max_length": 720,
|
||||||
|
"pad_token": null,
|
||||||
|
"padding_side": "right",
|
||||||
|
"sp_model_kwargs": {},
|
||||||
|
"tokenizer_class": "LlamaTokenizer",
|
||||||
|
"unk_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
2788
trainer_state.json
Normal file
2788
trainer_state.json
Normal file
File diff suppressed because it is too large
Load Diff
3
training_args.bin
Normal file
3
training_args.bin
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
version https://git-lfs.github.com/spec/v1
|
||||||
|
oid sha256:ee7b5cbd6e2c82fff914ea83dcdc84ac059d8e2a06be53e1109b69f320111393
|
||||||
|
size 5371
|
||||||
578
zero_to_fp32.py
Executable file
578
zero_to_fp32.py
Executable file
@@ -0,0 +1,578 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# Copyright (c) Microsoft Corporation.
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# DeepSpeed Team
|
||||||
|
|
||||||
|
# This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
|
||||||
|
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
||||||
|
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
||||||
|
# application.
|
||||||
|
#
|
||||||
|
# example: python zero_to_fp32.py . pytorch_model.bin
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import torch
|
||||||
|
import glob
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from collections import OrderedDict
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
||||||
|
# DeepSpeed data structures it has to be available in the current python environment.
|
||||||
|
from deepspeed.utils import logger
|
||||||
|
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
|
||||||
|
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
|
||||||
|
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class zero_model_state:
|
||||||
|
buffers: dict()
|
||||||
|
param_shapes: dict()
|
||||||
|
shared_params: list
|
||||||
|
ds_version: int
|
||||||
|
frozen_param_shapes: dict()
|
||||||
|
frozen_param_fragments: dict()
|
||||||
|
|
||||||
|
|
||||||
|
debug = 0
|
||||||
|
|
||||||
|
# load to cpu
|
||||||
|
device = torch.device('cpu')
|
||||||
|
|
||||||
|
|
||||||
|
def atoi(text):
|
||||||
|
return int(text) if text.isdigit() else text
|
||||||
|
|
||||||
|
|
||||||
|
def natural_keys(text):
|
||||||
|
'''
|
||||||
|
alist.sort(key=natural_keys) sorts in human order
|
||||||
|
http://nedbatchelder.com/blog/200712/human_sorting.html
|
||||||
|
(See Toothy's implementation in the comments)
|
||||||
|
'''
|
||||||
|
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
||||||
|
|
||||||
|
|
||||||
|
def get_model_state_file(checkpoint_dir, zero_stage):
|
||||||
|
if not os.path.isdir(checkpoint_dir):
|
||||||
|
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
||||||
|
|
||||||
|
# there should be only one file
|
||||||
|
if zero_stage == 2:
|
||||||
|
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
||||||
|
elif zero_stage == 3:
|
||||||
|
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
||||||
|
|
||||||
|
if not os.path.exists(file):
|
||||||
|
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
||||||
|
|
||||||
|
return file
|
||||||
|
|
||||||
|
|
||||||
|
def get_checkpoint_files(checkpoint_dir, glob_pattern):
|
||||||
|
# XXX: need to test that this simple glob rule works for multi-node setup too
|
||||||
|
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
|
||||||
|
|
||||||
|
if len(ckpt_files) == 0:
|
||||||
|
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
|
||||||
|
|
||||||
|
return ckpt_files
|
||||||
|
|
||||||
|
|
||||||
|
def get_optim_files(checkpoint_dir):
|
||||||
|
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
|
||||||
|
|
||||||
|
|
||||||
|
def get_model_state_files(checkpoint_dir):
|
||||||
|
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_model_states(files):
|
||||||
|
zero_model_states = []
|
||||||
|
for file in files:
|
||||||
|
state_dict = torch.load(file, map_location=device)
|
||||||
|
|
||||||
|
if BUFFER_NAMES not in state_dict:
|
||||||
|
raise ValueError(f"{file} is not a model state checkpoint")
|
||||||
|
buffer_names = state_dict[BUFFER_NAMES]
|
||||||
|
if debug:
|
||||||
|
print("Found buffers:", buffer_names)
|
||||||
|
|
||||||
|
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
||||||
|
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
|
||||||
|
param_shapes = state_dict[PARAM_SHAPES]
|
||||||
|
|
||||||
|
# collect parameters that are included in param_shapes
|
||||||
|
param_names = []
|
||||||
|
for s in param_shapes:
|
||||||
|
for name in s.keys():
|
||||||
|
param_names.append(name)
|
||||||
|
|
||||||
|
# update with frozen parameters
|
||||||
|
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
|
||||||
|
if frozen_param_shapes is not None:
|
||||||
|
if debug:
|
||||||
|
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
|
||||||
|
param_names += list(frozen_param_shapes.keys())
|
||||||
|
|
||||||
|
# handle shared params
|
||||||
|
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
|
||||||
|
|
||||||
|
ds_version = state_dict.get(DS_VERSION, None)
|
||||||
|
|
||||||
|
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
|
||||||
|
|
||||||
|
z_model_state = zero_model_state(buffers=buffers,
|
||||||
|
param_shapes=param_shapes,
|
||||||
|
shared_params=shared_params,
|
||||||
|
ds_version=ds_version,
|
||||||
|
frozen_param_shapes=frozen_param_shapes,
|
||||||
|
frozen_param_fragments=frozen_param_fragments)
|
||||||
|
zero_model_states.append(z_model_state)
|
||||||
|
|
||||||
|
return zero_model_states
|
||||||
|
|
||||||
|
|
||||||
|
def parse_optim_states(files, ds_checkpoint_dir):
|
||||||
|
|
||||||
|
total_files = len(files)
|
||||||
|
state_dicts = []
|
||||||
|
for f in files:
|
||||||
|
state_dicts.append(torch.load(f, map_location=device))
|
||||||
|
|
||||||
|
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
||||||
|
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
||||||
|
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
||||||
|
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
||||||
|
|
||||||
|
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
||||||
|
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
||||||
|
# use the max of the partition_count to get the dp world_size.
|
||||||
|
|
||||||
|
if type(world_size) is list:
|
||||||
|
world_size = max(world_size)
|
||||||
|
|
||||||
|
if world_size != total_files:
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
||||||
|
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
||||||
|
)
|
||||||
|
|
||||||
|
# the groups are named differently in each stage
|
||||||
|
if zero_stage == 2:
|
||||||
|
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
||||||
|
elif zero_stage == 3:
|
||||||
|
fp32_groups_key = FP32_FLAT_GROUPS
|
||||||
|
else:
|
||||||
|
raise ValueError(f"unknown zero stage {zero_stage}")
|
||||||
|
|
||||||
|
if zero_stage == 2:
|
||||||
|
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
|
||||||
|
elif zero_stage == 3:
|
||||||
|
# if there is more than one param group, there will be multiple flattened tensors - one
|
||||||
|
# flattened tensor per group - for simplicity merge them into a single tensor
|
||||||
|
#
|
||||||
|
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
||||||
|
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
||||||
|
|
||||||
|
fp32_flat_groups = [
|
||||||
|
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
|
||||||
|
]
|
||||||
|
|
||||||
|
return zero_stage, world_size, fp32_flat_groups
|
||||||
|
|
||||||
|
|
||||||
|
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
||||||
|
"""
|
||||||
|
Returns fp32 state_dict reconstructed from ds checkpoint
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
||||||
|
|
||||||
|
"""
|
||||||
|
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
||||||
|
|
||||||
|
optim_files = get_optim_files(ds_checkpoint_dir)
|
||||||
|
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
||||||
|
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
||||||
|
|
||||||
|
model_files = get_model_state_files(ds_checkpoint_dir)
|
||||||
|
|
||||||
|
zero_model_states = parse_model_states(model_files)
|
||||||
|
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
|
||||||
|
|
||||||
|
if zero_stage == 2:
|
||||||
|
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
||||||
|
elif zero_stage == 3:
|
||||||
|
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
|
||||||
|
|
||||||
|
|
||||||
|
def _zero2_merge_frozen_params(state_dict, zero_model_states):
|
||||||
|
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
||||||
|
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
|
||||||
|
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
||||||
|
|
||||||
|
wanted_params = len(frozen_param_shapes)
|
||||||
|
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
||||||
|
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
|
||||||
|
print(f'Frozen params: Have {avail_numel} numels to process.')
|
||||||
|
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
||||||
|
|
||||||
|
total_params = 0
|
||||||
|
total_numel = 0
|
||||||
|
for name, shape in frozen_param_shapes.items():
|
||||||
|
total_params += 1
|
||||||
|
unpartitioned_numel = shape.numel()
|
||||||
|
total_numel += unpartitioned_numel
|
||||||
|
|
||||||
|
state_dict[name] = frozen_param_fragments[name]
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
||||||
|
|
||||||
|
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
||||||
|
|
||||||
|
|
||||||
|
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
||||||
|
param_shapes = zero_model_states[0].param_shapes
|
||||||
|
|
||||||
|
# Reconstruction protocol:
|
||||||
|
#
|
||||||
|
# XXX: document this
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
for i in range(world_size):
|
||||||
|
for j in range(len(fp32_flat_groups[0])):
|
||||||
|
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
||||||
|
|
||||||
|
# XXX: memory usage doubles here (zero2)
|
||||||
|
num_param_groups = len(fp32_flat_groups[0])
|
||||||
|
merged_single_partition_of_fp32_groups = []
|
||||||
|
for i in range(num_param_groups):
|
||||||
|
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
||||||
|
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
||||||
|
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
||||||
|
avail_numel = sum(
|
||||||
|
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
||||||
|
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
||||||
|
# not asserting if there is a mismatch due to possible padding
|
||||||
|
print(f"Have {avail_numel} numels to process.")
|
||||||
|
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
||||||
|
|
||||||
|
# params
|
||||||
|
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
||||||
|
# out-of-core computing solution
|
||||||
|
total_numel = 0
|
||||||
|
total_params = 0
|
||||||
|
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
||||||
|
offset = 0
|
||||||
|
avail_numel = full_single_fp32_vector.numel()
|
||||||
|
for name, shape in shapes.items():
|
||||||
|
|
||||||
|
unpartitioned_numel = shape.numel()
|
||||||
|
total_numel += unpartitioned_numel
|
||||||
|
total_params += 1
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
|
||||||
|
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
|
||||||
|
offset += unpartitioned_numel
|
||||||
|
|
||||||
|
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
||||||
|
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
||||||
|
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
||||||
|
# live optimizer object, so we are checking that the numbers are within the right range
|
||||||
|
align_to = 2 * world_size
|
||||||
|
|
||||||
|
def zero2_align(x):
|
||||||
|
return align_to * math.ceil(x / align_to)
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(f"original offset={offset}, avail_numel={avail_numel}")
|
||||||
|
|
||||||
|
offset = zero2_align(offset)
|
||||||
|
avail_numel = zero2_align(avail_numel)
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
||||||
|
|
||||||
|
# Sanity check
|
||||||
|
if offset != avail_numel:
|
||||||
|
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
||||||
|
|
||||||
|
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
||||||
|
state_dict = OrderedDict()
|
||||||
|
|
||||||
|
# buffers
|
||||||
|
buffers = zero_model_states[0].buffers
|
||||||
|
state_dict.update(buffers)
|
||||||
|
if debug:
|
||||||
|
print(f"added {len(buffers)} buffers")
|
||||||
|
|
||||||
|
_zero2_merge_frozen_params(state_dict, zero_model_states)
|
||||||
|
|
||||||
|
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
||||||
|
|
||||||
|
# recover shared parameters
|
||||||
|
for pair in zero_model_states[0].shared_params:
|
||||||
|
if pair[1] in state_dict:
|
||||||
|
state_dict[pair[0]] = state_dict[pair[1]]
|
||||||
|
|
||||||
|
return state_dict
|
||||||
|
|
||||||
|
|
||||||
|
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
||||||
|
remainder = unpartitioned_numel % world_size
|
||||||
|
padding_numel = (world_size - remainder) if remainder else 0
|
||||||
|
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
||||||
|
return partitioned_numel, padding_numel
|
||||||
|
|
||||||
|
|
||||||
|
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
|
||||||
|
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
|
||||||
|
return
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
for i in range(world_size):
|
||||||
|
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
|
||||||
|
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
|
||||||
|
|
||||||
|
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
|
||||||
|
wanted_params = len(frozen_param_shapes)
|
||||||
|
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
|
||||||
|
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
|
||||||
|
print(f'Frozen params: Have {avail_numel} numels to process.')
|
||||||
|
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
|
||||||
|
|
||||||
|
total_params = 0
|
||||||
|
total_numel = 0
|
||||||
|
for name, shape in zero_model_states[0].frozen_param_shapes.items():
|
||||||
|
total_params += 1
|
||||||
|
unpartitioned_numel = shape.numel()
|
||||||
|
total_numel += unpartitioned_numel
|
||||||
|
|
||||||
|
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
|
||||||
|
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
|
||||||
|
|
||||||
|
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(
|
||||||
|
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
|
||||||
|
|
||||||
|
|
||||||
|
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
|
||||||
|
param_shapes = zero_model_states[0].param_shapes
|
||||||
|
avail_numel = fp32_flat_groups[0].numel() * world_size
|
||||||
|
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
||||||
|
# param, re-consolidating each param, while dealing with padding if any
|
||||||
|
|
||||||
|
# merge list of dicts, preserving order
|
||||||
|
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
for i in range(world_size):
|
||||||
|
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
||||||
|
|
||||||
|
wanted_params = len(param_shapes)
|
||||||
|
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
||||||
|
# not asserting if there is a mismatch due to possible padding
|
||||||
|
avail_numel = fp32_flat_groups[0].numel() * world_size
|
||||||
|
print(f"Trainable params: Have {avail_numel} numels to process.")
|
||||||
|
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
|
||||||
|
|
||||||
|
# params
|
||||||
|
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
||||||
|
# out-of-core computing solution
|
||||||
|
offset = 0
|
||||||
|
total_numel = 0
|
||||||
|
total_params = 0
|
||||||
|
for name, shape in param_shapes.items():
|
||||||
|
|
||||||
|
unpartitioned_numel = shape.numel()
|
||||||
|
total_numel += unpartitioned_numel
|
||||||
|
total_params += 1
|
||||||
|
|
||||||
|
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
||||||
|
|
||||||
|
if debug:
|
||||||
|
print(
|
||||||
|
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# XXX: memory usage doubles here
|
||||||
|
state_dict[name] = torch.cat(
|
||||||
|
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
|
||||||
|
0).narrow(0, 0, unpartitioned_numel).view(shape)
|
||||||
|
offset += partitioned_numel
|
||||||
|
|
||||||
|
offset *= world_size
|
||||||
|
|
||||||
|
# Sanity check
|
||||||
|
if offset != avail_numel:
|
||||||
|
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
||||||
|
|
||||||
|
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
|
||||||
|
state_dict = OrderedDict()
|
||||||
|
|
||||||
|
# buffers
|
||||||
|
buffers = zero_model_states[0].buffers
|
||||||
|
state_dict.update(buffers)
|
||||||
|
if debug:
|
||||||
|
print(f"added {len(buffers)} buffers")
|
||||||
|
|
||||||
|
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
|
||||||
|
|
||||||
|
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
|
||||||
|
|
||||||
|
# recover shared parameters
|
||||||
|
for pair in zero_model_states[0].shared_params:
|
||||||
|
if pair[1] in state_dict:
|
||||||
|
state_dict[pair[0]] = state_dict[pair[1]]
|
||||||
|
|
||||||
|
return state_dict
|
||||||
|
|
||||||
|
|
||||||
|
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
||||||
|
"""
|
||||||
|
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
||||||
|
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
||||||
|
via a model hub.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- ``checkpoint_dir``: path to the desired checkpoint folder
|
||||||
|
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- pytorch ``state_dict``
|
||||||
|
|
||||||
|
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
||||||
|
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
||||||
|
the checkpoint.
|
||||||
|
|
||||||
|
A typical usage might be ::
|
||||||
|
|
||||||
|
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
||||||
|
# do the training and checkpoint saving
|
||||||
|
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
||||||
|
model = model.cpu() # move to cpu
|
||||||
|
model.load_state_dict(state_dict)
|
||||||
|
# submit to model hub or save the model to share with others
|
||||||
|
|
||||||
|
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
||||||
|
application. i.e. you will need to re-initialize the deepspeed engine, since
|
||||||
|
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
||||||
|
|
||||||
|
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if tag is None:
|
||||||
|
latest_path = os.path.join(checkpoint_dir, 'latest')
|
||||||
|
if os.path.isfile(latest_path):
|
||||||
|
with open(latest_path, 'r') as fd:
|
||||||
|
tag = fd.read().strip()
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
||||||
|
|
||||||
|
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
||||||
|
|
||||||
|
if not os.path.isdir(ds_checkpoint_dir):
|
||||||
|
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
||||||
|
|
||||||
|
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
||||||
|
"""
|
||||||
|
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
||||||
|
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
||||||
|
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
||||||
|
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
||||||
|
"""
|
||||||
|
|
||||||
|
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
||||||
|
print(f"Saving fp32 state dict to {output_file}")
|
||||||
|
torch.save(state_dict, output_file)
|
||||||
|
|
||||||
|
|
||||||
|
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
||||||
|
"""
|
||||||
|
1. Put the provided model to cpu
|
||||||
|
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
||||||
|
3. Load it into the provided model
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- ``model``: the model object to update
|
||||||
|
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
||||||
|
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- ``model`: modified model
|
||||||
|
|
||||||
|
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
||||||
|
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
||||||
|
conveniently placed for you in the checkpoint folder.
|
||||||
|
|
||||||
|
A typical usage might be ::
|
||||||
|
|
||||||
|
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
||||||
|
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
||||||
|
# submit to model hub or save the model to share with others
|
||||||
|
|
||||||
|
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
||||||
|
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
||||||
|
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
||||||
|
|
||||||
|
"""
|
||||||
|
logger.info(f"Extracting fp32 weights")
|
||||||
|
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
||||||
|
|
||||||
|
logger.info(f"Overwriting model with fp32 weights")
|
||||||
|
model = model.cpu()
|
||||||
|
model.load_state_dict(state_dict, strict=False)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("checkpoint_dir",
|
||||||
|
type=str,
|
||||||
|
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
||||||
|
parser.add_argument(
|
||||||
|
"output_file",
|
||||||
|
type=str,
|
||||||
|
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
|
||||||
|
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
debug = args.debug
|
||||||
|
|
||||||
|
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|
||||||
Reference in New Issue
Block a user