yongzx
commited on
Commit
·
bb1fc54
0
Parent(s):
Push to HF hub
Browse files- checkpoint-5000/config.json +38 -0
- checkpoint-5000/global_step5000/mp_rank_00_model_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/global_step5000/zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
- checkpoint-5000/latest +1 -0
- checkpoint-5000/pytorch_model-00001-of-00002.bin +3 -0
- checkpoint-5000/pytorch_model-00002-of-00002.bin +3 -0
- checkpoint-5000/pytorch_model.bin.index.json +373 -0
- checkpoint-5000/rng_state_0.pth +3 -0
- checkpoint-5000/rng_state_1.pth +3 -0
- checkpoint-5000/rng_state_2.pth +3 -0
- checkpoint-5000/rng_state_3.pth +3 -0
- checkpoint-5000/rng_state_4.pth +3 -0
- checkpoint-5000/rng_state_5.pth +3 -0
- checkpoint-5000/rng_state_6.pth +3 -0
- checkpoint-5000/rng_state_7.pth +3 -0
- checkpoint-5000/special_tokens_map.json +1 -0
- checkpoint-5000/tokenizer.json +3 -0
- checkpoint-5000/tokenizer_config.json +1 -0
- checkpoint-5000/trainer_state.json +36 -0
- checkpoint-5000/training_args.bin +3 -0
- checkpoint-5000/zero_to_fp32.py +482 -0
checkpoint-5000/config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "bigscience/bloom-7b1",
|
3 |
+
"adapters": {
|
4 |
+
"adapters": {},
|
5 |
+
"config_map": {},
|
6 |
+
"fusion_config_map": {},
|
7 |
+
"fusions": {}
|
8 |
+
},
|
9 |
+
"apply_residual_connection_post_layernorm": false,
|
10 |
+
"architectures": [
|
11 |
+
"BloomForCausalLM"
|
12 |
+
],
|
13 |
+
"attention_dropout": 0.0,
|
14 |
+
"attention_softmax_in_fp32": true,
|
15 |
+
"bias_dropout_fusion": true,
|
16 |
+
"bos_token_id": 1,
|
17 |
+
"eos_token_id": 2,
|
18 |
+
"hidden_dropout": 0.0,
|
19 |
+
"hidden_size": 4096,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"layer_norm_epsilon": 1e-05,
|
22 |
+
"masked_softmax_fusion": true,
|
23 |
+
"model_type": "bloom",
|
24 |
+
"n_head": 32,
|
25 |
+
"n_inner": null,
|
26 |
+
"n_layer": 30,
|
27 |
+
"offset_alibi": 100,
|
28 |
+
"pad_token_id": 3,
|
29 |
+
"pretraining_tp": 1,
|
30 |
+
"skip_bias_add": true,
|
31 |
+
"skip_bias_add_qkv": false,
|
32 |
+
"slow_but_exact": false,
|
33 |
+
"torch_dtype": "float16",
|
34 |
+
"transformers_version": "4.20.0.dev0",
|
35 |
+
"unk_token_id": 0,
|
36 |
+
"use_cache": true,
|
37 |
+
"vocab_size": 250880
|
38 |
+
}
|
checkpoint-5000/global_step5000/mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be5f7239bf3e83ba0b02be1849b03a4e2c3169f8bd174c38361e804d3feaf3be
|
3 |
+
size 14138151891
|
checkpoint-5000/global_step5000/zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e201105c48d454f06772019e33b7e55dd634959276074751e976a61ba843889c
|
3 |
+
size 10603527171
|
checkpoint-5000/global_step5000/zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10fa04d5481b42d2386fd3633a064d637268ce98a60224a4456aeb5be64e2f7a
|
3 |
+
size 10603530883
|
checkpoint-5000/global_step5000/zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:61565040a4d15d3378471e5308b56380abd3efbbc851c03dad52adba62e266b9
|
3 |
+
size 10603531139
|
checkpoint-5000/global_step5000/zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:78f637d4c5a735da81004c0970aebb6d050370227b6208eb53f7d145dfa9f4df
|
3 |
+
size 10603531331
|
checkpoint-5000/global_step5000/zero_pp_rank_4_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c95a9fc5ac8daeec862d02ef86dbf858ea95872936394f08f10ef19574d401a0
|
3 |
+
size 10603531011
|
checkpoint-5000/global_step5000/zero_pp_rank_5_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3359b4c339f843e9ef9ef02f9eb2a260576e2ea3bb99bcb69137934e46dd365c
|
3 |
+
size 10603531203
|
checkpoint-5000/global_step5000/zero_pp_rank_6_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86d00774a1a6802f1b78a9c40e81e152bfa405946b6fccfc9ef51678f438e4a5
|
3 |
+
size 10603531395
|
checkpoint-5000/global_step5000/zero_pp_rank_7_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eae351ba0af864a423591472f5172627436f601b6dec6d3783d64e1be3341b2c
|
3 |
+
size 10603531203
|
checkpoint-5000/latest
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
global_step5000
|
checkpoint-5000/pytorch_model-00001-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e800a4cff1ea0940cd566c1350783f3aa9ae42b1ae34c2f868f9f4dfad2bac65
|
3 |
+
size 14138065939
|
checkpoint-5000/pytorch_model-00002-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d965501ab024d192f1e74458df2eed53bb1dcb11486098e76f6c97430bcaeea8
|
3 |
+
size 14138049747
|
checkpoint-5000/pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 16193241088
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "pytorch_model-00002-of-00002.bin",
|
7 |
+
"transformer.h.0.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
8 |
+
"transformer.h.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
9 |
+
"transformer.h.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
10 |
+
"transformer.h.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
11 |
+
"transformer.h.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
12 |
+
"transformer.h.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
13 |
+
"transformer.h.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
14 |
+
"transformer.h.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
15 |
+
"transformer.h.0.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
16 |
+
"transformer.h.0.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
17 |
+
"transformer.h.0.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
18 |
+
"transformer.h.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
19 |
+
"transformer.h.1.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
20 |
+
"transformer.h.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
21 |
+
"transformer.h.1.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
22 |
+
"transformer.h.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
23 |
+
"transformer.h.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
24 |
+
"transformer.h.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
25 |
+
"transformer.h.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
26 |
+
"transformer.h.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
27 |
+
"transformer.h.1.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
28 |
+
"transformer.h.1.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
29 |
+
"transformer.h.1.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
30 |
+
"transformer.h.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
31 |
+
"transformer.h.10.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
32 |
+
"transformer.h.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
33 |
+
"transformer.h.10.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
34 |
+
"transformer.h.10.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
35 |
+
"transformer.h.10.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
36 |
+
"transformer.h.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
37 |
+
"transformer.h.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
38 |
+
"transformer.h.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
39 |
+
"transformer.h.10.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
40 |
+
"transformer.h.10.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
41 |
+
"transformer.h.10.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
42 |
+
"transformer.h.10.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
43 |
+
"transformer.h.11.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
44 |
+
"transformer.h.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
45 |
+
"transformer.h.11.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
46 |
+
"transformer.h.11.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
47 |
+
"transformer.h.11.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
48 |
+
"transformer.h.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
49 |
+
"transformer.h.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
50 |
+
"transformer.h.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
51 |
+
"transformer.h.11.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
52 |
+
"transformer.h.11.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
53 |
+
"transformer.h.11.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
54 |
+
"transformer.h.11.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
55 |
+
"transformer.h.12.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
56 |
+
"transformer.h.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
57 |
+
"transformer.h.12.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
58 |
+
"transformer.h.12.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
59 |
+
"transformer.h.12.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
60 |
+
"transformer.h.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
61 |
+
"transformer.h.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
62 |
+
"transformer.h.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
63 |
+
"transformer.h.12.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
64 |
+
"transformer.h.12.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
65 |
+
"transformer.h.12.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
66 |
+
"transformer.h.12.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
67 |
+
"transformer.h.13.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
68 |
+
"transformer.h.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
69 |
+
"transformer.h.13.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
70 |
+
"transformer.h.13.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
71 |
+
"transformer.h.13.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
72 |
+
"transformer.h.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
73 |
+
"transformer.h.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
74 |
+
"transformer.h.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
75 |
+
"transformer.h.13.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
76 |
+
"transformer.h.13.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
77 |
+
"transformer.h.13.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
78 |
+
"transformer.h.13.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
79 |
+
"transformer.h.14.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
80 |
+
"transformer.h.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
81 |
+
"transformer.h.14.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
82 |
+
"transformer.h.14.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
83 |
+
"transformer.h.14.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
84 |
+
"transformer.h.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
85 |
+
"transformer.h.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
86 |
+
"transformer.h.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
87 |
+
"transformer.h.14.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
88 |
+
"transformer.h.14.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
89 |
+
"transformer.h.14.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
90 |
+
"transformer.h.14.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
91 |
+
"transformer.h.15.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
92 |
+
"transformer.h.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
93 |
+
"transformer.h.15.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
94 |
+
"transformer.h.15.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
95 |
+
"transformer.h.15.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
96 |
+
"transformer.h.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
97 |
+
"transformer.h.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
98 |
+
"transformer.h.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
99 |
+
"transformer.h.15.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
100 |
+
"transformer.h.15.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
101 |
+
"transformer.h.15.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
102 |
+
"transformer.h.15.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
103 |
+
"transformer.h.16.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
104 |
+
"transformer.h.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
105 |
+
"transformer.h.16.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
106 |
+
"transformer.h.16.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
107 |
+
"transformer.h.16.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
108 |
+
"transformer.h.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
109 |
+
"transformer.h.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
110 |
+
"transformer.h.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
111 |
+
"transformer.h.16.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
112 |
+
"transformer.h.16.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
113 |
+
"transformer.h.16.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
114 |
+
"transformer.h.16.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
115 |
+
"transformer.h.17.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
116 |
+
"transformer.h.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
117 |
+
"transformer.h.17.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
118 |
+
"transformer.h.17.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
119 |
+
"transformer.h.17.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
120 |
+
"transformer.h.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
121 |
+
"transformer.h.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
122 |
+
"transformer.h.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
123 |
+
"transformer.h.17.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
124 |
+
"transformer.h.17.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
125 |
+
"transformer.h.17.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
126 |
+
"transformer.h.17.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
127 |
+
"transformer.h.18.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
128 |
+
"transformer.h.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
129 |
+
"transformer.h.18.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
130 |
+
"transformer.h.18.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
131 |
+
"transformer.h.18.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
132 |
+
"transformer.h.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
133 |
+
"transformer.h.18.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
134 |
+
"transformer.h.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
135 |
+
"transformer.h.18.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
136 |
+
"transformer.h.18.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
137 |
+
"transformer.h.18.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
138 |
+
"transformer.h.18.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
139 |
+
"transformer.h.19.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
140 |
+
"transformer.h.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
141 |
+
"transformer.h.19.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
142 |
+
"transformer.h.19.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
143 |
+
"transformer.h.19.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
144 |
+
"transformer.h.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
145 |
+
"transformer.h.19.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
146 |
+
"transformer.h.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
147 |
+
"transformer.h.19.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
148 |
+
"transformer.h.19.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
149 |
+
"transformer.h.19.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
150 |
+
"transformer.h.19.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
151 |
+
"transformer.h.2.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
152 |
+
"transformer.h.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
153 |
+
"transformer.h.2.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
154 |
+
"transformer.h.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
155 |
+
"transformer.h.2.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
156 |
+
"transformer.h.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
157 |
+
"transformer.h.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
158 |
+
"transformer.h.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
159 |
+
"transformer.h.2.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
160 |
+
"transformer.h.2.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
161 |
+
"transformer.h.2.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
162 |
+
"transformer.h.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
163 |
+
"transformer.h.20.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
164 |
+
"transformer.h.20.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
165 |
+
"transformer.h.20.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
166 |
+
"transformer.h.20.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
167 |
+
"transformer.h.20.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
168 |
+
"transformer.h.20.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
169 |
+
"transformer.h.20.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
170 |
+
"transformer.h.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
171 |
+
"transformer.h.20.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
172 |
+
"transformer.h.20.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
173 |
+
"transformer.h.20.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
174 |
+
"transformer.h.20.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
175 |
+
"transformer.h.21.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
176 |
+
"transformer.h.21.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
177 |
+
"transformer.h.21.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
178 |
+
"transformer.h.21.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
179 |
+
"transformer.h.21.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
180 |
+
"transformer.h.21.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
181 |
+
"transformer.h.21.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
182 |
+
"transformer.h.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
183 |
+
"transformer.h.21.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
184 |
+
"transformer.h.21.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
185 |
+
"transformer.h.21.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
186 |
+
"transformer.h.21.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
187 |
+
"transformer.h.22.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
188 |
+
"transformer.h.22.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
189 |
+
"transformer.h.22.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
190 |
+
"transformer.h.22.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
191 |
+
"transformer.h.22.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
192 |
+
"transformer.h.22.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
193 |
+
"transformer.h.22.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
194 |
+
"transformer.h.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
195 |
+
"transformer.h.22.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
196 |
+
"transformer.h.22.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
197 |
+
"transformer.h.22.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
198 |
+
"transformer.h.22.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
199 |
+
"transformer.h.23.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
200 |
+
"transformer.h.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
201 |
+
"transformer.h.23.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
202 |
+
"transformer.h.23.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
203 |
+
"transformer.h.23.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
204 |
+
"transformer.h.23.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
205 |
+
"transformer.h.23.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
206 |
+
"transformer.h.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
207 |
+
"transformer.h.23.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
208 |
+
"transformer.h.23.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
209 |
+
"transformer.h.23.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
210 |
+
"transformer.h.23.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
211 |
+
"transformer.h.24.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
212 |
+
"transformer.h.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
213 |
+
"transformer.h.24.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
214 |
+
"transformer.h.24.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
215 |
+
"transformer.h.24.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
216 |
+
"transformer.h.24.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
217 |
+
"transformer.h.24.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
218 |
+
"transformer.h.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
219 |
+
"transformer.h.24.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
220 |
+
"transformer.h.24.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
221 |
+
"transformer.h.24.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
222 |
+
"transformer.h.24.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
223 |
+
"transformer.h.25.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
224 |
+
"transformer.h.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
225 |
+
"transformer.h.25.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
226 |
+
"transformer.h.25.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
227 |
+
"transformer.h.25.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
228 |
+
"transformer.h.25.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
229 |
+
"transformer.h.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
230 |
+
"transformer.h.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
231 |
+
"transformer.h.25.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
232 |
+
"transformer.h.25.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
233 |
+
"transformer.h.25.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
234 |
+
"transformer.h.25.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
235 |
+
"transformer.h.26.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
236 |
+
"transformer.h.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
237 |
+
"transformer.h.26.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
238 |
+
"transformer.h.26.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
239 |
+
"transformer.h.26.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
240 |
+
"transformer.h.26.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
241 |
+
"transformer.h.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
242 |
+
"transformer.h.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
243 |
+
"transformer.h.26.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
244 |
+
"transformer.h.26.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
245 |
+
"transformer.h.26.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
246 |
+
"transformer.h.26.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
247 |
+
"transformer.h.27.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
248 |
+
"transformer.h.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
249 |
+
"transformer.h.27.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
250 |
+
"transformer.h.27.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
251 |
+
"transformer.h.27.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
252 |
+
"transformer.h.27.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
253 |
+
"transformer.h.27.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
254 |
+
"transformer.h.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
255 |
+
"transformer.h.27.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
256 |
+
"transformer.h.27.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
257 |
+
"transformer.h.27.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
258 |
+
"transformer.h.27.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
259 |
+
"transformer.h.28.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
260 |
+
"transformer.h.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
261 |
+
"transformer.h.28.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
262 |
+
"transformer.h.28.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
263 |
+
"transformer.h.28.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
264 |
+
"transformer.h.28.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
265 |
+
"transformer.h.28.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
266 |
+
"transformer.h.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
267 |
+
"transformer.h.28.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
268 |
+
"transformer.h.28.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
269 |
+
"transformer.h.28.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
270 |
+
"transformer.h.28.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
271 |
+
"transformer.h.29.input_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
272 |
+
"transformer.h.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
273 |
+
"transformer.h.29.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00002.bin",
|
274 |
+
"transformer.h.29.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00002.bin",
|
275 |
+
"transformer.h.29.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00002.bin",
|
276 |
+
"transformer.h.29.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
|
277 |
+
"transformer.h.29.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
|
278 |
+
"transformer.h.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
|
279 |
+
"transformer.h.29.self_attention.dense.bias": "pytorch_model-00002-of-00002.bin",
|
280 |
+
"transformer.h.29.self_attention.dense.weight": "pytorch_model-00002-of-00002.bin",
|
281 |
+
"transformer.h.29.self_attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
|
282 |
+
"transformer.h.29.self_attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
|
283 |
+
"transformer.h.3.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
284 |
+
"transformer.h.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
285 |
+
"transformer.h.3.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
286 |
+
"transformer.h.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
287 |
+
"transformer.h.3.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
288 |
+
"transformer.h.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
289 |
+
"transformer.h.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
290 |
+
"transformer.h.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
291 |
+
"transformer.h.3.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
292 |
+
"transformer.h.3.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
293 |
+
"transformer.h.3.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
294 |
+
"transformer.h.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
295 |
+
"transformer.h.4.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
296 |
+
"transformer.h.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
297 |
+
"transformer.h.4.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
298 |
+
"transformer.h.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
299 |
+
"transformer.h.4.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
300 |
+
"transformer.h.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
301 |
+
"transformer.h.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
302 |
+
"transformer.h.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
303 |
+
"transformer.h.4.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
304 |
+
"transformer.h.4.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
305 |
+
"transformer.h.4.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
306 |
+
"transformer.h.4.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
307 |
+
"transformer.h.5.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
308 |
+
"transformer.h.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
309 |
+
"transformer.h.5.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
310 |
+
"transformer.h.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
311 |
+
"transformer.h.5.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
312 |
+
"transformer.h.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
313 |
+
"transformer.h.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
314 |
+
"transformer.h.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
315 |
+
"transformer.h.5.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
316 |
+
"transformer.h.5.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
317 |
+
"transformer.h.5.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
318 |
+
"transformer.h.5.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
319 |
+
"transformer.h.6.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
320 |
+
"transformer.h.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
321 |
+
"transformer.h.6.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
322 |
+
"transformer.h.6.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
323 |
+
"transformer.h.6.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
324 |
+
"transformer.h.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
325 |
+
"transformer.h.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
326 |
+
"transformer.h.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
327 |
+
"transformer.h.6.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
328 |
+
"transformer.h.6.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
329 |
+
"transformer.h.6.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
330 |
+
"transformer.h.6.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
331 |
+
"transformer.h.7.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
332 |
+
"transformer.h.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
333 |
+
"transformer.h.7.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
334 |
+
"transformer.h.7.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
335 |
+
"transformer.h.7.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
336 |
+
"transformer.h.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
337 |
+
"transformer.h.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
338 |
+
"transformer.h.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
339 |
+
"transformer.h.7.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
340 |
+
"transformer.h.7.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
341 |
+
"transformer.h.7.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
342 |
+
"transformer.h.7.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
343 |
+
"transformer.h.8.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
344 |
+
"transformer.h.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
345 |
+
"transformer.h.8.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
346 |
+
"transformer.h.8.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
347 |
+
"transformer.h.8.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
348 |
+
"transformer.h.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
349 |
+
"transformer.h.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
350 |
+
"transformer.h.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
351 |
+
"transformer.h.8.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
352 |
+
"transformer.h.8.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
353 |
+
"transformer.h.8.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
354 |
+
"transformer.h.8.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
355 |
+
"transformer.h.9.input_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
356 |
+
"transformer.h.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
357 |
+
"transformer.h.9.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00002.bin",
|
358 |
+
"transformer.h.9.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00002.bin",
|
359 |
+
"transformer.h.9.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00002.bin",
|
360 |
+
"transformer.h.9.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
|
361 |
+
"transformer.h.9.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
362 |
+
"transformer.h.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
|
363 |
+
"transformer.h.9.self_attention.dense.bias": "pytorch_model-00001-of-00002.bin",
|
364 |
+
"transformer.h.9.self_attention.dense.weight": "pytorch_model-00001-of-00002.bin",
|
365 |
+
"transformer.h.9.self_attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
|
366 |
+
"transformer.h.9.self_attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
|
367 |
+
"transformer.ln_f.bias": "pytorch_model-00002-of-00002.bin",
|
368 |
+
"transformer.ln_f.weight": "pytorch_model-00002-of-00002.bin",
|
369 |
+
"transformer.word_embeddings.weight": "pytorch_model-00001-of-00002.bin",
|
370 |
+
"transformer.word_embeddings_layernorm.bias": "pytorch_model-00001-of-00002.bin",
|
371 |
+
"transformer.word_embeddings_layernorm.weight": "pytorch_model-00001-of-00002.bin"
|
372 |
+
}
|
373 |
+
}
|
checkpoint-5000/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_4.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_5.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_6.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/rng_state_7.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04b579140bfcedbfb111291cf28bb2124d6e4bc8025c94f735dc9bbfa358f7c0
|
3 |
+
size 14503
|
checkpoint-5000/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
checkpoint-5000/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
|
3 |
+
size 14500443
|
checkpoint-5000/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-7b1", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
|
checkpoint-5000/trainer_state.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 2.775390625,
|
3 |
+
"best_model_checkpoint": "/home/zhengxinyong/outputs/bloom-7b1_de_continual-pretrain_100000samples_-1vocab_original/checkpoint-5000",
|
4 |
+
"epoch": 0.5495109352676119,
|
5 |
+
"global_step": 5000,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.27,
|
12 |
+
"learning_rate": 0.0001,
|
13 |
+
"loss": 3.0311,
|
14 |
+
"step": 2500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.55,
|
18 |
+
"learning_rate": 0.0001,
|
19 |
+
"loss": 2.7334,
|
20 |
+
"step": 5000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.55,
|
24 |
+
"eval_loss": 2.775390625,
|
25 |
+
"eval_runtime": 12.1862,
|
26 |
+
"eval_samples_per_second": 65.976,
|
27 |
+
"eval_steps_per_second": 8.288,
|
28 |
+
"step": 5000
|
29 |
+
}
|
30 |
+
],
|
31 |
+
"max_steps": 25000,
|
32 |
+
"num_train_epochs": 3,
|
33 |
+
"total_flos": 1.4847373463173202e+18,
|
34 |
+
"trial_name": null,
|
35 |
+
"trial_params": null
|
36 |
+
}
|
checkpoint-5000/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a6c419972f4b56ec21c71afa39eb805af3cd86942e2c33ef1c8b084aa1d90d6d
|
3 |
+
size 4399
|
checkpoint-5000/zero_to_fp32.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
# This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
|
4 |
+
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
|
5 |
+
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
|
6 |
+
# application.
|
7 |
+
#
|
8 |
+
# example: python zero_to_fp32.py . pytorch_model.bin
|
9 |
+
|
10 |
+
import argparse
|
11 |
+
import torch
|
12 |
+
import glob
|
13 |
+
import math
|
14 |
+
import os
|
15 |
+
import re
|
16 |
+
from collections import OrderedDict
|
17 |
+
|
18 |
+
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
|
19 |
+
# DeepSpeed data structures it has to be available in the current python environment.
|
20 |
+
from deepspeed.utils import logger
|
21 |
+
from deepspeed.checkpoint.constants import (DS_VERSION,
|
22 |
+
OPTIMIZER_STATE_DICT,
|
23 |
+
SINGLE_PARTITION_OF_FP32_GROUPS,
|
24 |
+
FP32_FLAT_GROUPS,
|
25 |
+
ZERO_STAGE,
|
26 |
+
PARTITION_COUNT,
|
27 |
+
PARAM_SHAPES,
|
28 |
+
BUFFER_NAMES)
|
29 |
+
|
30 |
+
debug = 0
|
31 |
+
|
32 |
+
# load to cpu
|
33 |
+
device = torch.device('cpu')
|
34 |
+
|
35 |
+
|
36 |
+
def atoi(text):
|
37 |
+
return int(text) if text.isdigit() else text
|
38 |
+
|
39 |
+
|
40 |
+
def natural_keys(text):
|
41 |
+
'''
|
42 |
+
alist.sort(key=natural_keys) sorts in human order
|
43 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
44 |
+
(See Toothy's implementation in the comments)
|
45 |
+
'''
|
46 |
+
return [atoi(c) for c in re.split(r'(\d+)', text)]
|
47 |
+
|
48 |
+
|
49 |
+
def get_model_state_file(checkpoint_dir, zero_stage):
|
50 |
+
if not os.path.isdir(checkpoint_dir):
|
51 |
+
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
|
52 |
+
|
53 |
+
# there should be only one file
|
54 |
+
if zero_stage == 2:
|
55 |
+
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
|
56 |
+
elif zero_stage == 3:
|
57 |
+
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
|
58 |
+
|
59 |
+
if not os.path.exists(file):
|
60 |
+
raise FileNotFoundError(f"can't find model states file at '{file}'")
|
61 |
+
|
62 |
+
return file
|
63 |
+
|
64 |
+
|
65 |
+
def get_optim_files(checkpoint_dir):
|
66 |
+
# XXX: need to test that this simple glob rule works for multi-node setup too
|
67 |
+
optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
|
68 |
+
"*_optim_states.pt")),
|
69 |
+
key=natural_keys)
|
70 |
+
|
71 |
+
if len(optim_files) == 0:
|
72 |
+
raise FileNotFoundError(
|
73 |
+
f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
|
74 |
+
|
75 |
+
return optim_files
|
76 |
+
|
77 |
+
|
78 |
+
def parse_model_state(file):
|
79 |
+
state_dict = torch.load(file, map_location=device)
|
80 |
+
|
81 |
+
if BUFFER_NAMES not in state_dict:
|
82 |
+
raise ValueError(f"{file} is not a model state checkpoint")
|
83 |
+
buffer_names = state_dict[BUFFER_NAMES]
|
84 |
+
if debug:
|
85 |
+
print("Found buffers:", buffer_names)
|
86 |
+
|
87 |
+
# recover just the buffers while restoring them to fp32 if they were saved in fp16
|
88 |
+
buffers = {
|
89 |
+
k: v.float()
|
90 |
+
for k,
|
91 |
+
v in state_dict["module"].items() if k in buffer_names
|
92 |
+
}
|
93 |
+
param_shapes = state_dict[PARAM_SHAPES]
|
94 |
+
|
95 |
+
ds_version = state_dict.get(DS_VERSION, None)
|
96 |
+
|
97 |
+
return buffers, param_shapes, ds_version
|
98 |
+
|
99 |
+
|
100 |
+
def parse_optim_states(files, ds_checkpoint_dir):
|
101 |
+
|
102 |
+
total_files = len(files)
|
103 |
+
state_dicts = []
|
104 |
+
for f in files:
|
105 |
+
state_dicts.append(torch.load(f, map_location=device))
|
106 |
+
|
107 |
+
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
|
108 |
+
raise ValueError(f"{files[0]} is not a zero checkpoint")
|
109 |
+
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
|
110 |
+
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
|
111 |
+
|
112 |
+
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
|
113 |
+
# parameters can be different from data parallelism for non-expert parameters. So we can just
|
114 |
+
# use the max of the partition_count to get the dp world_size.
|
115 |
+
|
116 |
+
if type(world_size) is list:
|
117 |
+
world_size = max(world_size)
|
118 |
+
|
119 |
+
if world_size != total_files:
|
120 |
+
raise ValueError(
|
121 |
+
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
|
122 |
+
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
|
123 |
+
)
|
124 |
+
|
125 |
+
# the groups are named differently in each stage
|
126 |
+
if zero_stage == 2:
|
127 |
+
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
|
128 |
+
elif zero_stage == 3:
|
129 |
+
fp32_groups_key = FP32_FLAT_GROUPS
|
130 |
+
else:
|
131 |
+
raise ValueError(f"unknown zero stage {zero_stage}")
|
132 |
+
|
133 |
+
if zero_stage == 2:
|
134 |
+
fp32_flat_groups = [
|
135 |
+
state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
|
136 |
+
for i in range(len(state_dicts))
|
137 |
+
]
|
138 |
+
elif zero_stage == 3:
|
139 |
+
# if there is more than one param group, there will be multiple flattened tensors - one
|
140 |
+
# flattened tensor per group - for simplicity merge them into a single tensor
|
141 |
+
#
|
142 |
+
# XXX: could make the script more memory efficient for when there are multiple groups - it
|
143 |
+
# will require matching the sub-lists of param_shapes for each param group flattened tensor
|
144 |
+
|
145 |
+
fp32_flat_groups = [
|
146 |
+
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
|
147 |
+
0) for i in range(len(state_dicts))
|
148 |
+
]
|
149 |
+
|
150 |
+
return zero_stage, world_size, fp32_flat_groups
|
151 |
+
|
152 |
+
|
153 |
+
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
|
154 |
+
"""
|
155 |
+
Returns fp32 state_dict reconstructed from ds checkpoint
|
156 |
+
|
157 |
+
Args:
|
158 |
+
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
159 |
+
|
160 |
+
"""
|
161 |
+
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
|
162 |
+
|
163 |
+
optim_files = get_optim_files(ds_checkpoint_dir)
|
164 |
+
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
|
165 |
+
print(
|
166 |
+
f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
|
167 |
+
|
168 |
+
model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
|
169 |
+
buffers, param_shapes, ds_version = parse_model_state(model_file)
|
170 |
+
print(f'Parsing checkpoint created by deepspeed=={ds_version}')
|
171 |
+
|
172 |
+
if zero_stage == 2:
|
173 |
+
return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
|
174 |
+
param_shapes,
|
175 |
+
fp32_flat_groups,
|
176 |
+
buffers)
|
177 |
+
elif zero_stage == 3:
|
178 |
+
return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
|
179 |
+
param_shapes,
|
180 |
+
fp32_flat_groups,
|
181 |
+
buffers)
|
182 |
+
|
183 |
+
|
184 |
+
def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
|
185 |
+
param_shapes,
|
186 |
+
fp32_flat_groups,
|
187 |
+
buffers):
|
188 |
+
|
189 |
+
# Reconstruction protocol:
|
190 |
+
#
|
191 |
+
# XXX: document this
|
192 |
+
|
193 |
+
if debug:
|
194 |
+
for i in range(world_size):
|
195 |
+
for j in range(len(fp32_flat_groups[0])):
|
196 |
+
print(
|
197 |
+
f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
|
198 |
+
|
199 |
+
# XXX: memory usage doubles here (zero2)
|
200 |
+
num_param_groups = len(fp32_flat_groups[0])
|
201 |
+
merged_single_partition_of_fp32_groups = []
|
202 |
+
for i in range(num_param_groups):
|
203 |
+
merged_partitions = [sd[i] for sd in fp32_flat_groups]
|
204 |
+
full_single_fp32_vector = torch.cat(merged_partitions, 0)
|
205 |
+
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
|
206 |
+
avail_numel = sum([
|
207 |
+
full_single_fp32_vector.numel()
|
208 |
+
for full_single_fp32_vector in merged_single_partition_of_fp32_groups
|
209 |
+
])
|
210 |
+
|
211 |
+
if debug:
|
212 |
+
wanted_params = sum([len(shapes) for shapes in param_shapes])
|
213 |
+
wanted_numel = sum(
|
214 |
+
[sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
|
215 |
+
# not asserting if there is a mismatch due to possible padding
|
216 |
+
print(f"Have {avail_numel} numels to process.")
|
217 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
218 |
+
|
219 |
+
state_dict = OrderedDict()
|
220 |
+
|
221 |
+
# buffers
|
222 |
+
state_dict.update(buffers)
|
223 |
+
if debug:
|
224 |
+
print(f"added {len(buffers)} buffers")
|
225 |
+
|
226 |
+
# params
|
227 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
228 |
+
# out-of-core computing solution
|
229 |
+
total_numel = 0
|
230 |
+
total_params = 0
|
231 |
+
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
|
232 |
+
offset = 0
|
233 |
+
avail_numel = full_single_fp32_vector.numel()
|
234 |
+
for name, shape in shapes.items():
|
235 |
+
|
236 |
+
unpartitioned_numel = shape.numel()
|
237 |
+
total_numel += unpartitioned_numel
|
238 |
+
total_params += 1
|
239 |
+
|
240 |
+
if debug:
|
241 |
+
print(
|
242 |
+
f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
|
243 |
+
)
|
244 |
+
state_dict[name] = full_single_fp32_vector.narrow(
|
245 |
+
0,
|
246 |
+
offset,
|
247 |
+
unpartitioned_numel).view(shape)
|
248 |
+
offset += unpartitioned_numel
|
249 |
+
|
250 |
+
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
|
251 |
+
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
|
252 |
+
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
|
253 |
+
# live optimizer object, so we are checking that the numbers are within the right range
|
254 |
+
align_to = 2 * world_size
|
255 |
+
|
256 |
+
def zero2_align(x):
|
257 |
+
return align_to * math.ceil(x / align_to)
|
258 |
+
|
259 |
+
if debug:
|
260 |
+
print(f"original offset={offset}, avail_numel={avail_numel}")
|
261 |
+
|
262 |
+
offset = zero2_align(offset)
|
263 |
+
avail_numel = zero2_align(avail_numel)
|
264 |
+
|
265 |
+
if debug:
|
266 |
+
print(f"aligned offset={offset}, avail_numel={avail_numel}")
|
267 |
+
|
268 |
+
# Sanity check
|
269 |
+
if offset != avail_numel:
|
270 |
+
raise ValueError(
|
271 |
+
f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
272 |
+
|
273 |
+
print(
|
274 |
+
f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
|
275 |
+
)
|
276 |
+
|
277 |
+
return state_dict
|
278 |
+
|
279 |
+
|
280 |
+
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
|
281 |
+
remainder = unpartitioned_numel % world_size
|
282 |
+
padding_numel = (world_size - remainder) if remainder else 0
|
283 |
+
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
|
284 |
+
return partitioned_numel, padding_numel
|
285 |
+
|
286 |
+
|
287 |
+
def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
|
288 |
+
param_shapes,
|
289 |
+
fp32_flat_groups,
|
290 |
+
buffers):
|
291 |
+
|
292 |
+
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
|
293 |
+
# param, re-consolidating each param, while dealing with padding if any
|
294 |
+
|
295 |
+
avail_numel = fp32_flat_groups[0].numel() * world_size
|
296 |
+
# merge list of dicts, preserving order
|
297 |
+
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
|
298 |
+
|
299 |
+
if debug:
|
300 |
+
for i in range(world_size):
|
301 |
+
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
|
302 |
+
|
303 |
+
wanted_params = len(param_shapes)
|
304 |
+
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
|
305 |
+
# not asserting if there is a mismatch due to possible padding
|
306 |
+
print(f"Have {avail_numel} numels to process.")
|
307 |
+
print(f"Need {wanted_numel} numels in {wanted_params} params.")
|
308 |
+
|
309 |
+
state_dict = OrderedDict()
|
310 |
+
|
311 |
+
# buffers
|
312 |
+
state_dict.update(buffers)
|
313 |
+
if debug:
|
314 |
+
print(f"added {len(buffers)} buffers")
|
315 |
+
|
316 |
+
# params
|
317 |
+
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
|
318 |
+
# out-of-core computing solution
|
319 |
+
offset = 0
|
320 |
+
total_numel = 0
|
321 |
+
total_params = 0
|
322 |
+
for name, shape in param_shapes.items():
|
323 |
+
|
324 |
+
unpartitioned_numel = shape.numel()
|
325 |
+
total_numel += unpartitioned_numel
|
326 |
+
total_params += 1
|
327 |
+
|
328 |
+
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
|
329 |
+
|
330 |
+
if debug:
|
331 |
+
print(
|
332 |
+
f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
|
333 |
+
)
|
334 |
+
|
335 |
+
# XXX: memory usage doubles here
|
336 |
+
state_dict[name] = torch.cat(
|
337 |
+
tuple(fp32_flat_groups[i].narrow(0,
|
338 |
+
offset,
|
339 |
+
partitioned_numel)
|
340 |
+
for i in range(world_size)),
|
341 |
+
0).narrow(0,
|
342 |
+
0,
|
343 |
+
unpartitioned_numel).view(shape)
|
344 |
+
offset += partitioned_numel
|
345 |
+
|
346 |
+
offset *= world_size
|
347 |
+
|
348 |
+
# Sanity check
|
349 |
+
if offset != avail_numel:
|
350 |
+
raise ValueError(
|
351 |
+
f"consumed {offset} numels out of {avail_numel} - something is wrong")
|
352 |
+
|
353 |
+
print(
|
354 |
+
f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
|
355 |
+
)
|
356 |
+
|
357 |
+
return state_dict
|
358 |
+
|
359 |
+
|
360 |
+
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
|
361 |
+
"""
|
362 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
|
363 |
+
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
|
364 |
+
via a model hub.
|
365 |
+
|
366 |
+
Args:
|
367 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder
|
368 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
|
369 |
+
|
370 |
+
Returns:
|
371 |
+
- pytorch ``state_dict``
|
372 |
+
|
373 |
+
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
|
374 |
+
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
|
375 |
+
the checkpoint.
|
376 |
+
|
377 |
+
A typical usage might be ::
|
378 |
+
|
379 |
+
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
|
380 |
+
# do the training and checkpoint saving
|
381 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
|
382 |
+
model = model.cpu() # move to cpu
|
383 |
+
model.load_state_dict(state_dict)
|
384 |
+
# submit to model hub or save the model to share with others
|
385 |
+
|
386 |
+
In this example the ``model`` will no longer be usable in the deepspeed context of the same
|
387 |
+
application. i.e. you will need to re-initialize the deepspeed engine, since
|
388 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
389 |
+
|
390 |
+
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
|
391 |
+
|
392 |
+
"""
|
393 |
+
if tag is None:
|
394 |
+
latest_path = os.path.join(checkpoint_dir, 'latest')
|
395 |
+
if os.path.isfile(latest_path):
|
396 |
+
with open(latest_path, 'r') as fd:
|
397 |
+
tag = fd.read().strip()
|
398 |
+
else:
|
399 |
+
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
|
400 |
+
|
401 |
+
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
|
402 |
+
|
403 |
+
if not os.path.isdir(ds_checkpoint_dir):
|
404 |
+
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
|
405 |
+
|
406 |
+
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
|
407 |
+
|
408 |
+
|
409 |
+
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
|
410 |
+
"""
|
411 |
+
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
|
412 |
+
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
|
413 |
+
|
414 |
+
Args:
|
415 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
416 |
+
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
|
417 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
418 |
+
"""
|
419 |
+
|
420 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
421 |
+
print(f"Saving fp32 state dict to {output_file}")
|
422 |
+
torch.save(state_dict, output_file)
|
423 |
+
|
424 |
+
|
425 |
+
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
|
426 |
+
"""
|
427 |
+
1. Put the provided model to cpu
|
428 |
+
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
|
429 |
+
3. Load it into the provided model
|
430 |
+
|
431 |
+
Args:
|
432 |
+
- ``model``: the model object to update
|
433 |
+
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
|
434 |
+
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
|
435 |
+
|
436 |
+
Returns:
|
437 |
+
- ``model`: modified model
|
438 |
+
|
439 |
+
Make sure you have plenty of CPU memory available before you call this function. If you don't
|
440 |
+
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
|
441 |
+
conveniently placed for you in the checkpoint folder.
|
442 |
+
|
443 |
+
A typical usage might be ::
|
444 |
+
|
445 |
+
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
|
446 |
+
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
|
447 |
+
# submit to model hub or save the model to share with others
|
448 |
+
|
449 |
+
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
|
450 |
+
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
|
451 |
+
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
|
452 |
+
|
453 |
+
"""
|
454 |
+
logger.info(f"Extracting fp32 weights")
|
455 |
+
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
|
456 |
+
|
457 |
+
logger.info(f"Overwriting model with fp32 weights")
|
458 |
+
model = model.cpu()
|
459 |
+
model.load_state_dict(state_dict, strict=False)
|
460 |
+
|
461 |
+
return model
|
462 |
+
|
463 |
+
|
464 |
+
if __name__ == "__main__":
|
465 |
+
|
466 |
+
parser = argparse.ArgumentParser()
|
467 |
+
parser.add_argument(
|
468 |
+
"checkpoint_dir",
|
469 |
+
type=str,
|
470 |
+
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
|
471 |
+
parser.add_argument(
|
472 |
+
"output_file",
|
473 |
+
type=str,
|
474 |
+
help=
|
475 |
+
"path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
|
476 |
+
)
|
477 |
+
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
|
478 |
+
args = parser.parse_args()
|
479 |
+
|
480 |
+
debug = args.debug
|
481 |
+
|
482 |
+
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
|