初始化项目,由ModelHub XC社区提供模型

Model: cjiao/OpenThoughts3-greedy-groups-top-openthinker3-1.5B-checkpoint-375-80
Source: Original Platform
This commit is contained in:
ModelHub XC
2026-04-22 11:30:45 +08:00
commit 58f6ef2daa
93 changed files with 765382 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
{
"</tool_call>": 151658,
"<tool_call>": 151657,
"<|box_end|>": 151649,
"<|box_start|>": 151648,
"<|endoftext|>": 151643,
"<|file_sep|>": 151664,
"<|fim_middle|>": 151660,
"<|fim_pad|>": 151662,
"<|fim_prefix|>": 151659,
"<|fim_suffix|>": 151661,
"<|im_end|>": 151645,
"<|im_start|>": 151644,
"<|image_pad|>": 151655,
"<|object_ref_end|>": 151647,
"<|object_ref_start|>": 151646,
"<|quad_end|>": 151651,
"<|quad_start|>": 151650,
"<|repo_name|>": 151663,
"<|video_pad|>": 151656,
"<|vision_end|>": 151653,
"<|vision_pad|>": 151654,
"<|vision_start|>": 151652
}

View File

@@ -0,0 +1,29 @@
{
"_name_or_path": "cjiao/OpenThinker3-1.5B-checkpoint-375",
"architectures": [
"Qwen2ForCausalLM"
],
"attention_dropout": 0.0,
"bos_token_id": 151643,
"eos_token_id": 151645,
"hidden_act": "silu",
"hidden_size": 1536,
"initializer_range": 0.02,
"intermediate_size": 8960,
"max_position_embeddings": 32768,
"max_window_layers": 21,
"model_type": "qwen2",
"num_attention_heads": 12,
"num_hidden_layers": 28,
"num_key_value_heads": 2,
"rms_norm_eps": 1e-06,
"rope_scaling": null,
"rope_theta": 1000000.0,
"sliding_window": null,
"tie_word_embeddings": true,
"torch_dtype": "bfloat16",
"transformers_version": "4.46.1",
"use_cache": false,
"use_sliding_window": false,
"vocab_size": 151936
}

View File

@@ -0,0 +1,14 @@
{
"bos_token_id": 151643,
"do_sample": true,
"eos_token_id": [
151645,
151643
],
"pad_token_id": 151643,
"repetition_penalty": 1.1,
"temperature": 0.7,
"top_k": 20,
"top_p": 0.8,
"transformers_version": "4.46.1"
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9571b0d7fc059edc0113d63802404da05eb0383cd1cd92d3203424030ff2cb02
size 9262302412

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d93f3e7204c2ec464d67df16e3d9eaa32c34e854b20d241c03a6f863abbaa396
size 9262305996

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e9d8d9c065d6a0c1de1a5464369d2d09f90e786d014f683b00b6026b7384ade4
size 3087520696

1
checkpoint-120/latest Normal file
View File

@@ -0,0 +1 @@
global_step120

151388
checkpoint-120/merges.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:678d71bc8099d93755401492f76911ff275389f2a5513b74c759e610e9eafac3
size 3554214752

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c8d6a959372d5e0c2ea025dd26c9d0ad2046fce19352056cae8074dcbd0a6fd4
size 14512

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0f68a37892a1b445d21bb35cc10bf7a058a6f9ec8c363f5ed156ff4f49d90fb6
size 14512

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:31e65a2851aba485e25b7e4ab2f62ddde29f561c454dcfac45945c44c5cf7423
size 1064

View File

@@ -0,0 +1,31 @@
{
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"eos_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
}
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
size 11421896

View File

@@ -0,0 +1,208 @@
{
"add_bos_token": false,
"add_prefix_space": false,
"added_tokens_decoder": {
"151643": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151644": {
"content": "<|im_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151645": {
"content": "<|im_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151646": {
"content": "<|object_ref_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151647": {
"content": "<|object_ref_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151648": {
"content": "<|box_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151649": {
"content": "<|box_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151650": {
"content": "<|quad_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151651": {
"content": "<|quad_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151652": {
"content": "<|vision_start|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151653": {
"content": "<|vision_end|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151654": {
"content": "<|vision_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151655": {
"content": "<|image_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151656": {
"content": "<|video_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"151657": {
"content": "<tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151658": {
"content": "</tool_call>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151659": {
"content": "<|fim_prefix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151660": {
"content": "<|fim_middle|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151661": {
"content": "<|fim_suffix|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151662": {
"content": "<|fim_pad|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151663": {
"content": "<|repo_name|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
},
"151664": {
"content": "<|file_sep|>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": false
}
},
"additional_special_tokens": [
"<|im_start|>",
"<|im_end|>",
"<|object_ref_start|>",
"<|object_ref_end|>",
"<|box_start|>",
"<|box_end|>",
"<|quad_start|>",
"<|quad_end|>",
"<|vision_start|>",
"<|vision_end|>",
"<|vision_pad|>",
"<|image_pad|>",
"<|video_pad|>"
],
"bos_token": null,
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"errors": "replace",
"model_max_length": 131072,
"pad_token": "<|endoftext|>",
"padding_side": "right",
"split_special_tokens": false,
"tokenizer_class": "Qwen2Tokenizer",
"unk_token": null
}

View File

@@ -0,0 +1,873 @@
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5,
"eval_steps": 500,
"global_step": 120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0125,
"grad_norm": 2.691509962081909,
"learning_rate": 0.00015998457923856519,
"loss": 1.4456,
"step": 1
},
{
"epoch": 0.025,
"grad_norm": 24.26286506652832,
"learning_rate": 0.00015993832289925785,
"loss": 4.3349,
"step": 2
},
{
"epoch": 0.0375,
"grad_norm": 13.163036346435547,
"learning_rate": 0.0001598612488147773,
"loss": 2.6695,
"step": 3
},
{
"epoch": 0.05,
"grad_norm": 9.818785667419434,
"learning_rate": 0.00015975338669865026,
"loss": 2.3799,
"step": 4
},
{
"epoch": 0.0625,
"grad_norm": 6.200242519378662,
"learning_rate": 0.00015961477813377576,
"loss": 2.0935,
"step": 5
},
{
"epoch": 0.075,
"grad_norm": 2.0556814670562744,
"learning_rate": 0.00015944547655639412,
"loss": 1.8465,
"step": 6
},
{
"epoch": 0.0875,
"grad_norm": 2.5195746421813965,
"learning_rate": 0.00015924554723548617,
"loss": 1.7321,
"step": 7
},
{
"epoch": 0.1,
"grad_norm": 4.300451278686523,
"learning_rate": 0.00015901506724761103,
"loss": 1.7284,
"step": 8
},
{
"epoch": 0.1125,
"grad_norm": 1.5021892786026,
"learning_rate": 0.00015875412544719134,
"loss": 1.5971,
"step": 9
},
{
"epoch": 0.125,
"grad_norm": 1.5246820449829102,
"learning_rate": 0.00015846282243225845,
"loss": 1.562,
"step": 10
},
{
"epoch": 0.1375,
"grad_norm": 2.0095787048339844,
"learning_rate": 0.0001581412705056698,
"loss": 1.578,
"step": 11
},
{
"epoch": 0.15,
"grad_norm": 0.9773982763290405,
"learning_rate": 0.00015778959363181415,
"loss": 1.4977,
"step": 12
},
{
"epoch": 0.1625,
"grad_norm": 1.1493251323699951,
"learning_rate": 0.0001574079273888208,
"loss": 1.5075,
"step": 13
},
{
"epoch": 0.175,
"grad_norm": 0.8909309506416321,
"learning_rate": 0.00015699641891629178,
"loss": 1.4158,
"step": 14
},
{
"epoch": 0.1875,
"grad_norm": 0.9415439963340759,
"learning_rate": 0.00015655522685857672,
"loss": 1.4219,
"step": 15
},
{
"epoch": 0.2,
"grad_norm": 1.1703603267669678,
"learning_rate": 0.0001560845213036123,
"loss": 1.4006,
"step": 16
},
{
"epoch": 0.2125,
"grad_norm": 0.7575011849403381,
"learning_rate": 0.00015558448371735025,
"loss": 1.3675,
"step": 17
},
{
"epoch": 0.225,
"grad_norm": 0.6772542595863342,
"learning_rate": 0.00015505530687379875,
"loss": 1.3369,
"step": 18
},
{
"epoch": 0.2375,
"grad_norm": 0.5587411522865295,
"learning_rate": 0.00015449719478070428,
"loss": 1.3632,
"step": 19
},
{
"epoch": 0.25,
"grad_norm": 0.5920618772506714,
"learning_rate": 0.00015391036260090294,
"loss": 1.3511,
"step": 20
},
{
"epoch": 0.2625,
"grad_norm": 0.4218953847885132,
"learning_rate": 0.0001532950365693709,
"loss": 1.3641,
"step": 21
},
{
"epoch": 0.275,
"grad_norm": 0.4676741361618042,
"learning_rate": 0.00015265145390600652,
"loss": 1.3441,
"step": 22
},
{
"epoch": 0.2875,
"grad_norm": 0.38095250725746155,
"learning_rate": 0.00015197986272417774,
"loss": 1.3418,
"step": 23
},
{
"epoch": 0.3,
"grad_norm": 0.42308753728866577,
"learning_rate": 0.00015128052193506944,
"loss": 1.3646,
"step": 24
},
{
"epoch": 0.3125,
"grad_norm": 0.4307089149951935,
"learning_rate": 0.0001505537011478684,
"loss": 1.2992,
"step": 25
},
{
"epoch": 0.325,
"grad_norm": 0.33103814721107483,
"learning_rate": 0.0001497996805658238,
"loss": 1.3435,
"step": 26
},
{
"epoch": 0.3375,
"grad_norm": 0.3511773645877838,
"learning_rate": 0.00014901875087822337,
"loss": 1.3,
"step": 27
},
{
"epoch": 0.35,
"grad_norm": 0.2914850115776062,
"learning_rate": 0.0001482112131483274,
"loss": 1.3103,
"step": 28
},
{
"epoch": 0.3625,
"grad_norm": 0.37050625681877136,
"learning_rate": 0.00014737737869730292,
"loss": 1.2731,
"step": 29
},
{
"epoch": 0.375,
"grad_norm": 0.3476356565952301,
"learning_rate": 0.00014651756898420365,
"loss": 1.3211,
"step": 30
},
{
"epoch": 0.3875,
"grad_norm": 0.27799472212791443,
"learning_rate": 0.0001456321154820411,
"loss": 1.2657,
"step": 31
},
{
"epoch": 0.4,
"grad_norm": 0.318327397108078,
"learning_rate": 0.00014472135954999581,
"loss": 1.3068,
"step": 32
},
{
"epoch": 0.4125,
"grad_norm": 0.30465707182884216,
"learning_rate": 0.00014378565230181657,
"loss": 1.2839,
"step": 33
},
{
"epoch": 0.425,
"grad_norm": 0.2618834376335144,
"learning_rate": 0.0001428253544704596,
"loss": 1.2868,
"step": 34
},
{
"epoch": 0.4375,
"grad_norm": 0.2864656150341034,
"learning_rate": 0.00014184083626901897,
"loss": 1.2815,
"step": 35
},
{
"epoch": 0.45,
"grad_norm": 0.2776831388473511,
"learning_rate": 0.0001408324772480025,
"loss": 1.2895,
"step": 36
},
{
"epoch": 0.4625,
"grad_norm": 0.31238630414009094,
"learning_rate": 0.00013980066614900776,
"loss": 1.2718,
"step": 37
},
{
"epoch": 0.475,
"grad_norm": 0.23365426063537598,
"learning_rate": 0.00013874580075485485,
"loss": 1.2596,
"step": 38
},
{
"epoch": 0.4875,
"grad_norm": 0.23924365639686584,
"learning_rate": 0.00013766828773623352,
"loss": 1.2809,
"step": 39
},
{
"epoch": 0.5,
"grad_norm": 0.24298632144927979,
"learning_rate": 0.00013656854249492382,
"loss": 1.2248,
"step": 40
},
{
"epoch": 0.5125,
"grad_norm": 0.25117772817611694,
"learning_rate": 0.0001354469890036509,
"loss": 1.2653,
"step": 41
},
{
"epoch": 0.525,
"grad_norm": 0.25377020239830017,
"learning_rate": 0.00013430405964263536,
"loss": 1.2687,
"step": 42
},
{
"epoch": 0.5375,
"grad_norm": 0.24669994413852692,
"learning_rate": 0.00013314019503290255,
"loss": 1.269,
"step": 43
},
{
"epoch": 0.55,
"grad_norm": 0.22006134688854218,
"learning_rate": 0.00013195584386641469,
"loss": 1.2559,
"step": 44
},
{
"epoch": 0.5625,
"grad_norm": 0.2517986595630646,
"learning_rate": 0.00013075146273309164,
"loss": 1.2477,
"step": 45
},
{
"epoch": 0.575,
"grad_norm": 0.21466796100139618,
"learning_rate": 0.00012952751594478675,
"loss": 1.2358,
"step": 46
},
{
"epoch": 0.5875,
"grad_norm": 0.2188994437456131,
"learning_rate": 0.0001282844753562857,
"loss": 1.2444,
"step": 47
},
{
"epoch": 0.6,
"grad_norm": 2.4198501110076904,
"learning_rate": 0.00012702282018339786,
"loss": 1.2535,
"step": 48
},
{
"epoch": 0.6125,
"grad_norm": 0.3393913209438324,
"learning_rate": 0.00012574303681820898,
"loss": 1.2361,
"step": 49
},
{
"epoch": 0.625,
"grad_norm": 0.32384437322616577,
"learning_rate": 0.0001244456186415682,
"loss": 1.2283,
"step": 50
},
{
"epoch": 0.6375,
"grad_norm": 0.3469082713127136,
"learning_rate": 0.00012313106583288004,
"loss": 1.2401,
"step": 51
},
{
"epoch": 0.65,
"grad_norm": 0.42606261372566223,
"learning_rate": 0.00012179988517727591,
"loss": 1.2399,
"step": 52
},
{
"epoch": 0.6625,
"grad_norm": 0.4077642261981964,
"learning_rate": 0.00012045258987023879,
"loss": 1.2441,
"step": 53
},
{
"epoch": 0.675,
"grad_norm": 0.3077225089073181,
"learning_rate": 0.00011908969931975641,
"loss": 1.253,
"step": 54
},
{
"epoch": 0.6875,
"grad_norm": 0.9925752878189087,
"learning_rate": 0.00011771173894607985,
"loss": 1.2586,
"step": 55
},
{
"epoch": 0.7,
"grad_norm": 1.9072725772857666,
"learning_rate": 0.00011631923997916375,
"loss": 1.2643,
"step": 56
},
{
"epoch": 0.7125,
"grad_norm": 0.5788567662239075,
"learning_rate": 0.00011491273925386736,
"loss": 1.2657,
"step": 57
},
{
"epoch": 0.725,
"grad_norm": 0.9417564868927002,
"learning_rate": 0.00011349277900299426,
"loss": 1.2526,
"step": 58
},
{
"epoch": 0.7375,
"grad_norm": 0.9247767329216003,
"learning_rate": 0.00011205990664825127,
"loss": 1.2402,
"step": 59
},
{
"epoch": 0.75,
"grad_norm": 0.5092797875404358,
"learning_rate": 0.00011061467458920719,
"loss": 1.2264,
"step": 60
},
{
"epoch": 0.7625,
"grad_norm": 0.8128093481063843,
"learning_rate": 0.00010915763999033201,
"loss": 1.22,
"step": 61
},
{
"epoch": 0.775,
"grad_norm": 0.4954143166542053,
"learning_rate": 0.00010768936456619945,
"loss": 1.203,
"step": 62
},
{
"epoch": 0.7875,
"grad_norm": 0.7117099761962891,
"learning_rate": 0.0001062104143649355,
"loss": 1.2295,
"step": 63
},
{
"epoch": 0.8,
"grad_norm": 0.5060359835624695,
"learning_rate": 0.0001047213595499958,
"loss": 1.1936,
"step": 64
},
{
"epoch": 0.8125,
"grad_norm": 0.5212268829345703,
"learning_rate": 0.000103222774180357,
"loss": 1.1927,
"step": 65
},
{
"epoch": 0.825,
"grad_norm": 0.47975900769233704,
"learning_rate": 0.00010171523598920594,
"loss": 1.2116,
"step": 66
},
{
"epoch": 0.8375,
"grad_norm": 0.3655720055103302,
"learning_rate": 0.00010019932616121264,
"loss": 1.2002,
"step": 67
},
{
"epoch": 0.85,
"grad_norm": 0.38993576169013977,
"learning_rate": 9.867562910847246e-05,
"loss": 1.2225,
"step": 68
},
{
"epoch": 0.8625,
"grad_norm": 0.33190780878067017,
"learning_rate": 9.714473224520406e-05,
"loss": 1.1982,
"step": 69
},
{
"epoch": 0.875,
"grad_norm": 0.3178853988647461,
"learning_rate": 9.560722576129029e-05,
"loss": 1.2015,
"step": 70
},
{
"epoch": 0.8875,
"grad_norm": 0.28483396768569946,
"learning_rate": 9.406370239474839e-05,
"loss": 1.2013,
"step": 71
},
{
"epoch": 0.9,
"grad_norm": 0.26456528902053833,
"learning_rate": 9.251475720321848e-05,
"loss": 1.2101,
"step": 72
},
{
"epoch": 0.9125,
"grad_norm": 0.24198457598686218,
"learning_rate": 9.096098733455746e-05,
"loss": 1.1889,
"step": 73
},
{
"epoch": 0.925,
"grad_norm": 0.2521977424621582,
"learning_rate": 8.940299179662703e-05,
"loss": 1.1915,
"step": 74
},
{
"epoch": 0.9375,
"grad_norm": 0.22842273116111755,
"learning_rate": 8.784137122636488e-05,
"loss": 1.2018,
"step": 75
},
{
"epoch": 0.95,
"grad_norm": 0.21817852556705475,
"learning_rate": 8.627672765822762e-05,
"loss": 1.188,
"step": 76
},
{
"epoch": 0.9625,
"grad_norm": 0.1990320086479187,
"learning_rate": 8.470966429209512e-05,
"loss": 1.1821,
"step": 77
},
{
"epoch": 0.975,
"grad_norm": 0.20685255527496338,
"learning_rate": 8.31407852607255e-05,
"loss": 1.1687,
"step": 78
},
{
"epoch": 0.9875,
"grad_norm": 0.20527754724025726,
"learning_rate": 8.157069539685026e-05,
"loss": 1.2024,
"step": 79
},
{
"epoch": 1.0,
"grad_norm": 0.20712077617645264,
"learning_rate": 8e-05,
"loss": 1.1721,
"step": 80
},
{
"epoch": 1.0125,
"grad_norm": 0.2558927536010742,
"learning_rate": 7.842930460314975e-05,
"loss": 1.2239,
"step": 81
},
{
"epoch": 1.025,
"grad_norm": 0.25813376903533936,
"learning_rate": 7.685921473927454e-05,
"loss": 1.2615,
"step": 82
},
{
"epoch": 1.0375,
"grad_norm": 0.22628559172153473,
"learning_rate": 7.529033570790488e-05,
"loss": 1.2229,
"step": 83
},
{
"epoch": 1.05,
"grad_norm": 0.19325922429561615,
"learning_rate": 7.372327234177242e-05,
"loss": 1.2115,
"step": 84
},
{
"epoch": 1.0625,
"grad_norm": 0.21385957300662994,
"learning_rate": 7.215862877363515e-05,
"loss": 1.2484,
"step": 85
},
{
"epoch": 1.075,
"grad_norm": 0.1882706731557846,
"learning_rate": 7.0597008203373e-05,
"loss": 1.2583,
"step": 86
},
{
"epoch": 1.0875,
"grad_norm": 0.1963510811328888,
"learning_rate": 6.903901266544258e-05,
"loss": 1.2448,
"step": 87
},
{
"epoch": 1.1,
"grad_norm": 0.18707427382469177,
"learning_rate": 6.748524279678152e-05,
"loss": 1.2694,
"step": 88
},
{
"epoch": 1.1125,
"grad_norm": 0.17115652561187744,
"learning_rate": 6.593629760525164e-05,
"loss": 1.2213,
"step": 89
},
{
"epoch": 1.125,
"grad_norm": 0.17526257038116455,
"learning_rate": 6.439277423870975e-05,
"loss": 1.2448,
"step": 90
},
{
"epoch": 1.1375,
"grad_norm": 0.17824630439281464,
"learning_rate": 6.285526775479596e-05,
"loss": 1.2967,
"step": 91
},
{
"epoch": 1.15,
"grad_norm": 0.17490257322788239,
"learning_rate": 6.13243708915276e-05,
"loss": 1.2426,
"step": 92
},
{
"epoch": 1.1625,
"grad_norm": 0.17836087942123413,
"learning_rate": 5.9800673838787364e-05,
"loss": 1.2834,
"step": 93
},
{
"epoch": 1.175,
"grad_norm": 0.165805846452713,
"learning_rate": 5.828476401079407e-05,
"loss": 1.2123,
"step": 94
},
{
"epoch": 1.1875,
"grad_norm": 0.16540847718715668,
"learning_rate": 5.677722581964303e-05,
"loss": 1.2358,
"step": 95
},
{
"epoch": 1.2,
"grad_norm": 0.15229956805706024,
"learning_rate": 5.5278640450004216e-05,
"loss": 1.2278,
"step": 96
},
{
"epoch": 1.2125,
"grad_norm": 0.1695357859134674,
"learning_rate": 5.3789585635064534e-05,
"loss": 1.2044,
"step": 97
},
{
"epoch": 1.225,
"grad_norm": 0.16060565412044525,
"learning_rate": 5.231063543380055e-05,
"loss": 1.1828,
"step": 98
},
{
"epoch": 1.2375,
"grad_norm": 0.15779414772987366,
"learning_rate": 5.084236000966803e-05,
"loss": 1.2169,
"step": 99
},
{
"epoch": 1.25,
"grad_norm": 0.14691977202892303,
"learning_rate": 4.9385325410792824e-05,
"loss": 1.2132,
"step": 100
},
{
"epoch": 1.2625,
"grad_norm": 0.15118764340877533,
"learning_rate": 4.794009335174874e-05,
"loss": 1.2336,
"step": 101
},
{
"epoch": 1.275,
"grad_norm": 0.13673175871372223,
"learning_rate": 4.650722099700578e-05,
"loss": 1.2196,
"step": 102
},
{
"epoch": 1.2875,
"grad_norm": 0.14001807570457458,
"learning_rate": 4.508726074613262e-05,
"loss": 1.2204,
"step": 103
},
{
"epoch": 1.3,
"grad_norm": 0.14354203641414642,
"learning_rate": 4.3680760020836266e-05,
"loss": 1.2468,
"step": 104
},
{
"epoch": 1.3125,
"grad_norm": 0.1509067714214325,
"learning_rate": 4.2288261053920186e-05,
"loss": 1.1899,
"step": 105
},
{
"epoch": 1.325,
"grad_norm": 0.14659079909324646,
"learning_rate": 4.0910300680243636e-05,
"loss": 1.2373,
"step": 106
},
{
"epoch": 1.3375,
"grad_norm": 0.15252433717250824,
"learning_rate": 3.954741012976125e-05,
"loss": 1.1971,
"step": 107
},
{
"epoch": 1.35,
"grad_norm": 0.1317344456911087,
"learning_rate": 3.8200114822724096e-05,
"loss": 1.2109,
"step": 108
},
{
"epoch": 1.3625,
"grad_norm": 0.1382139027118683,
"learning_rate": 3.686893416711998e-05,
"loss": 1.1777,
"step": 109
},
{
"epoch": 1.375,
"grad_norm": 0.13280591368675232,
"learning_rate": 3.5554381358431845e-05,
"loss": 1.2271,
"step": 110
},
{
"epoch": 1.3875,
"grad_norm": 0.13518379628658295,
"learning_rate": 3.425696318179103e-05,
"loss": 1.1753,
"step": 111
},
{
"epoch": 1.4,
"grad_norm": 0.11688841879367828,
"learning_rate": 3.297717981660216e-05,
"loss": 1.216,
"step": 112
},
{
"epoch": 1.4125,
"grad_norm": 0.12823420763015747,
"learning_rate": 3.1715524643714286e-05,
"loss": 1.1956,
"step": 113
},
{
"epoch": 1.425,
"grad_norm": 0.13634033501148224,
"learning_rate": 3.0472484055213276e-05,
"loss": 1.2017,
"step": 114
},
{
"epoch": 1.4375,
"grad_norm": 0.11677446216344833,
"learning_rate": 2.9248537266908373e-05,
"loss": 1.1977,
"step": 115
},
{
"epoch": 1.45,
"grad_norm": 0.12495961785316467,
"learning_rate": 2.804415613358532e-05,
"loss": 1.2068,
"step": 116
},
{
"epoch": 1.4625,
"grad_norm": 0.11066638678312302,
"learning_rate": 2.685980496709749e-05,
"loss": 1.1919,
"step": 117
},
{
"epoch": 1.475,
"grad_norm": 0.1103682890534401,
"learning_rate": 2.569594035736466e-05,
"loss": 1.1824,
"step": 118
},
{
"epoch": 1.4875,
"grad_norm": 0.11392944306135178,
"learning_rate": 2.4553010996349143e-05,
"loss": 1.2038,
"step": 119
},
{
"epoch": 1.5,
"grad_norm": 0.10885628312826157,
"learning_rate": 2.3431457505076205e-05,
"loss": 1.1529,
"step": 120
}
],
"logging_steps": 1,
"max_steps": 160,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 40,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.957096134517719e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7cff1c9b40ea170e068b9bfd9617336a994fa2abe18f52f6fecb0b4eebec4823
size 7544

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,604 @@
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
# application.
#
# example: python zero_to_fp32.py . pytorch_model.bin
import argparse
import torch
import glob
import math
import os
import re
from collections import OrderedDict
from dataclasses import dataclass
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
# DeepSpeed data structures it has to be available in the current python environment.
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
@dataclass
class zero_model_state:
buffers: dict()
param_shapes: dict()
shared_params: list
ds_version: int
frozen_param_shapes: dict()
frozen_param_fragments: dict()
debug = 0
# load to cpu
device = torch.device('cpu')
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_model_state_file(checkpoint_dir, zero_stage):
if not os.path.isdir(checkpoint_dir):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
# there should be only one file
if zero_stage <= 2:
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
elif zero_stage == 3:
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
if not os.path.exists(file):
raise FileNotFoundError(f"can't find model states file at '{file}'")
return file
def get_checkpoint_files(checkpoint_dir, glob_pattern):
# XXX: need to test that this simple glob rule works for multi-node setup too
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
if len(ckpt_files) == 0:
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
return ckpt_files
def get_optim_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
def get_model_state_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
def parse_model_states(files):
zero_model_states = []
for file in files:
state_dict = torch.load(file, map_location=device)
if BUFFER_NAMES not in state_dict:
raise ValueError(f"{file} is not a model state checkpoint")
buffer_names = state_dict[BUFFER_NAMES]
if debug:
print("Found buffers:", buffer_names)
# recover just the buffers while restoring them to fp32 if they were saved in fp16
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
param_shapes = state_dict[PARAM_SHAPES]
# collect parameters that are included in param_shapes
param_names = []
for s in param_shapes:
for name in s.keys():
param_names.append(name)
# update with frozen parameters
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
if frozen_param_shapes is not None:
if debug:
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
param_names += list(frozen_param_shapes.keys())
# handle shared params
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
ds_version = state_dict.get(DS_VERSION, None)
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
z_model_state = zero_model_state(buffers=buffers,
param_shapes=param_shapes,
shared_params=shared_params,
ds_version=ds_version,
frozen_param_shapes=frozen_param_shapes,
frozen_param_fragments=frozen_param_fragments)
zero_model_states.append(z_model_state)
return zero_model_states
def parse_optim_states(files, ds_checkpoint_dir):
total_files = len(files)
state_dicts = []
for f in files:
state_dict = torch.load(f, map_location=device)
# immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
# and also handle the case where it was already removed by another helper script
state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
state_dicts.append(state_dict)
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
raise ValueError(f"{files[0]} is not a zero checkpoint")
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
# parameters can be different from data parallelism for non-expert parameters. So we can just
# use the max of the partition_count to get the dp world_size.
if type(world_size) is list:
world_size = max(world_size)
if world_size != total_files:
raise ValueError(
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
)
# the groups are named differently in each stage
if zero_stage <= 2:
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
elif zero_stage == 3:
fp32_groups_key = FP32_FLAT_GROUPS
else:
raise ValueError(f"unknown zero stage {zero_stage}")
if zero_stage <= 2:
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
elif zero_stage == 3:
# if there is more than one param group, there will be multiple flattened tensors - one
# flattened tensor per group - for simplicity merge them into a single tensor
#
# XXX: could make the script more memory efficient for when there are multiple groups - it
# will require matching the sub-lists of param_shapes for each param group flattened tensor
fp32_flat_groups = [
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
]
return zero_stage, world_size, fp32_flat_groups
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
"""
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
"""
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
model_files = get_model_state_files(ds_checkpoint_dir)
zero_model_states = parse_model_states(model_files)
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
if zero_stage <= 2:
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
exclude_frozen_parameters)
elif zero_stage == 3:
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
exclude_frozen_parameters)
def _zero2_merge_frozen_params(state_dict, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
if debug:
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
state_dict[name] = frozen_param_fragments[name]
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _has_callable(obj, fn):
attr = getattr(obj, fn, None)
return callable(attr)
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
# Reconstruction protocol:
#
# XXX: document this
if debug:
for i in range(world_size):
for j in range(len(fp32_flat_groups[0])):
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
# XXX: memory usage doubles here (zero2)
num_param_groups = len(fp32_flat_groups[0])
merged_single_partition_of_fp32_groups = []
for i in range(num_param_groups):
merged_partitions = [sd[i] for sd in fp32_flat_groups]
full_single_fp32_vector = torch.cat(merged_partitions, 0)
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
avail_numel = sum(
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
if debug:
wanted_params = sum([len(shapes) for shapes in param_shapes])
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
# not asserting if there is a mismatch due to possible padding
print(f"Have {avail_numel} numels to process.")
print(f"Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
total_numel = 0
total_params = 0
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
offset = 0
avail_numel = full_single_fp32_vector.numel()
for name, shape in shapes.items():
unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
total_numel += unpartitioned_numel
total_params += 1
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
offset += unpartitioned_numel
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
# live optimizer object, so we are checking that the numbers are within the right range
align_to = 2 * world_size
def zero2_align(x):
return align_to * math.ceil(x / align_to)
if debug:
print(f"original offset={offset}, avail_numel={avail_numel}")
offset = zero2_align(offset)
avail_numel = zero2_align(avail_numel)
if debug:
print(f"aligned offset={offset}, avail_numel={avail_numel}")
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
exclude_frozen_parameters):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
if not exclude_frozen_parameters:
_zero2_merge_frozen_params(state_dict, zero_model_states)
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
remainder = unpartitioned_numel % world_size
padding_numel = (world_size - remainder) if remainder else 0
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
return partitioned_numel, padding_numel
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
if debug:
for i in range(world_size):
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in zero_model_states[0].frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
avail_numel = fp32_flat_groups[0].numel() * world_size
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
# param, re-consolidating each param, while dealing with padding if any
# merge list of dicts, preserving order
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
if debug:
for i in range(world_size):
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
wanted_params = len(param_shapes)
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
# not asserting if there is a mismatch due to possible padding
avail_numel = fp32_flat_groups[0].numel() * world_size
print(f"Trainable params: Have {avail_numel} numels to process.")
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
offset = 0
total_numel = 0
total_params = 0
for name, shape in param_shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
# XXX: memory usage doubles here
state_dict[name] = torch.cat(
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
0).narrow(0, 0, unpartitioned_numel).view(shape)
offset += partitioned_numel
offset *= world_size
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
exclude_frozen_parameters):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
if not exclude_frozen_parameters:
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
via a model hub.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
- ``exclude_frozen_parameters``: exclude frozen parameters
Returns:
- pytorch ``state_dict``
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
the checkpoint.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
# do the training and checkpoint saving
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
model = model.cpu() # move to cpu
model.load_state_dict(state_dict)
# submit to model hub or save the model to share with others
In this example the ``model`` will no longer be usable in the deepspeed context of the same
application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
"""
if tag is None:
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
if not os.path.isdir(ds_checkpoint_dir):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
- ``exclude_frozen_parameters``: exclude frozen parameters
"""
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
print(f"Saving fp32 state dict to {output_file}")
torch.save(state_dict, output_file)
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
"""
1. Put the provided model to cpu
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
3. Load it into the provided model
Args:
- ``model``: the model object to update
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
Returns:
- ``model`: modified model
Make sure you have plenty of CPU memory available before you call this function. If you don't
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
conveniently placed for you in the checkpoint folder.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
# submit to model hub or save the model to share with others
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
"""
logger.info(f"Extracting fp32 weights")
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
logger.info(f"Overwriting model with fp32 weights")
model = model.cpu()
model.load_state_dict(state_dict, strict=False)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir",
type=str,
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
parser.add_argument(
"output_file",
type=str,
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
parser.add_argument("-t",
"--tag",
type=str,
default=None,
help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
args = parser.parse_args()
debug = args.debug
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
args.output_file,
tag=args.tag,
exclude_frozen_parameters=args.exclude_frozen_parameters)