Muennighoff commited on
Commit
c79fa93
1 Parent(s): 27b18ae
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. gpt256.bsh +349 -0
  2. mup-1b-100m-e3-full-gpt2lmmup.json +0 -1
  3. mup-1b-100m-e3-full.json +0 -1
  4. {mup-1b-100m-e3 → mup-1b-100m}/config.json +1 -1
  5. {mup-200m-100m-e3 → mup-1b-100m}/pytorch_model.bin +1 -1
  6. {mup-1b-100m-e3 → mup-1b-100m}/training_args.bin +1 -1
  7. mup-200m-100m-e3-full-gpt2lmmup.json +0 -1
  8. mup-200m-100m-e3-full.json +0 -1
  9. mup-200m-100m-e3/config.json +0 -34
  10. {mup-3b5-100m-e3 → mup-200m-100m/checkpoint-100}/config.json +3 -3
  11. mup-200m-100m/checkpoint-100/optimizer.pt +3 -0
  12. mup-200m-100m/checkpoint-100/pytorch_model.bin +3 -0
  13. {mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-100}/rng_state.pth +1 -1
  14. {mup-2b5-100m-e3 → mup-200m-100m}/checkpoint-100/scaler.pt +0 -0
  15. {mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-100}/scheduler.pt +1 -1
  16. {mup-2b-100m-e3 → mup-200m-100m}/checkpoint-100/trainer_state.json +2 -2
  17. {mup-2b-100m-e3 → mup-200m-100m}/checkpoint-100/training_args.bin +1 -1
  18. {mup-2b-100m-e3 → mup-200m-100m}/checkpoint-200/config.json +4 -4
  19. mup-200m-100m/checkpoint-200/optimizer.pt +3 -0
  20. mup-200m-100m/checkpoint-200/pytorch_model.bin +3 -0
  21. {mup-2b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-200}/rng_state.pth +1 -1
  22. mup-200m-100m/checkpoint-200/scaler.pt +3 -0
  23. {mup-2b-100m-e3/checkpoint-300 → mup-200m-100m/checkpoint-200}/scheduler.pt +1 -1
  24. {mup-2b-100m-e3 → mup-200m-100m}/checkpoint-200/trainer_state.json +2 -2
  25. {mup-200m-100m-e3 → mup-200m-100m/checkpoint-200}/training_args.bin +1 -1
  26. mup-200m-100m/checkpoint-300/config.json +34 -0
  27. mup-200m-100m/checkpoint-300/optimizer.pt +3 -0
  28. mup-200m-100m/checkpoint-300/pytorch_model.bin +3 -0
  29. {mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/rng_state.pth +1 -1
  30. mup-200m-100m/checkpoint-300/scaler.pt +3 -0
  31. {mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/scheduler.pt +1 -1
  32. {mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/trainer_state.json +4 -4
  33. {mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-300}/training_args.bin +1 -1
  34. mup-200m-100m/config.json +34 -0
  35. mup-200m-100m/pytorch_model.bin +3 -0
  36. mup-200m-100m/training_args.bin +3 -0
  37. mup-2b-100m-e3-full-gpt2lmmup.json +0 -1
  38. mup-2b-100m-e3-full.json +0 -1
  39. mup-2b-100m-e3/checkpoint-200/optimizer.pt +0 -3
  40. mup-2b-100m-e3/checkpoint-300/optimizer.pt +0 -3
  41. mup-2b-100m-e3/checkpoint-300/training_args.bin +0 -3
  42. mup-2b-100m-e3/pytorch_model.bin +0 -3
  43. mup-2b-100m-e3/training_args.bin +0 -3
  44. {mup-2b-100m-e3/checkpoint-300 → mup-2b-100m}/config.json +1 -1
  45. {mup-2b-100m-e3/checkpoint-100 → mup-2b-100m}/pytorch_model.bin +1 -1
  46. mup-2b-100m/training_args.bin +3 -0
  47. mup-2b5-100m-e3-full-gpt2lmmup.json +0 -1
  48. mup-2b5-100m-e3-full.json +0 -1
  49. {mup-2b5-100m-e3 → mup-2b5-100m}/checkpoint-100/config.json +0 -0
  50. {mup-2b5-100m-e3 → mup-2b5-100m}/checkpoint-100/optimizer.pt +0 -0
gpt256.bsh ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a base shape file encoded in yaml
2
+ # - `null` indicates a dimension is "finite", i.e. a non-"width" dimension
3
+ # - a number indicates the base dimension of an "infinite" dimension, i.e. some notion of "width"
4
+ transformer.h.0.attn.c_attn.bias:
5
+ - 384
6
+ transformer.h.0.attn.c_attn.weight:
7
+ - 128
8
+ - 384
9
+ transformer.h.0.attn.c_proj.bias:
10
+ - 128
11
+ transformer.h.0.attn.c_proj.weight:
12
+ - 128
13
+ - 128
14
+ transformer.h.0.ln_1.bias:
15
+ - 128
16
+ transformer.h.0.ln_1.weight:
17
+ - 128
18
+ transformer.h.0.ln_2.bias:
19
+ - 128
20
+ transformer.h.0.ln_2.weight:
21
+ - 128
22
+ transformer.h.0.mlp.c_fc.bias:
23
+ - 512
24
+ transformer.h.0.mlp.c_fc.weight:
25
+ - 128
26
+ - 512
27
+ transformer.h.0.mlp.c_proj.bias:
28
+ - 128
29
+ transformer.h.0.mlp.c_proj.weight:
30
+ - 512
31
+ - 128
32
+ transformer.h.1.attn.c_attn.bias:
33
+ - 384
34
+ transformer.h.1.attn.c_attn.weight:
35
+ - 128
36
+ - 384
37
+ transformer.h.1.attn.c_proj.bias:
38
+ - 128
39
+ transformer.h.1.attn.c_proj.weight:
40
+ - 128
41
+ - 128
42
+ transformer.h.1.ln_1.bias:
43
+ - 128
44
+ transformer.h.1.ln_1.weight:
45
+ - 128
46
+ transformer.h.1.ln_2.bias:
47
+ - 128
48
+ transformer.h.1.ln_2.weight:
49
+ - 128
50
+ transformer.h.1.mlp.c_fc.bias:
51
+ - 512
52
+ transformer.h.1.mlp.c_fc.weight:
53
+ - 128
54
+ - 512
55
+ transformer.h.1.mlp.c_proj.bias:
56
+ - 128
57
+ transformer.h.1.mlp.c_proj.weight:
58
+ - 512
59
+ - 128
60
+ transformer.h.10.attn.c_attn.bias:
61
+ - 384
62
+ transformer.h.10.attn.c_attn.weight:
63
+ - 128
64
+ - 384
65
+ transformer.h.10.attn.c_proj.bias:
66
+ - 128
67
+ transformer.h.10.attn.c_proj.weight:
68
+ - 128
69
+ - 128
70
+ transformer.h.10.ln_1.bias:
71
+ - 128
72
+ transformer.h.10.ln_1.weight:
73
+ - 128
74
+ transformer.h.10.ln_2.bias:
75
+ - 128
76
+ transformer.h.10.ln_2.weight:
77
+ - 128
78
+ transformer.h.10.mlp.c_fc.bias:
79
+ - 512
80
+ transformer.h.10.mlp.c_fc.weight:
81
+ - 128
82
+ - 512
83
+ transformer.h.10.mlp.c_proj.bias:
84
+ - 128
85
+ transformer.h.10.mlp.c_proj.weight:
86
+ - 512
87
+ - 128
88
+ transformer.h.11.attn.c_attn.bias:
89
+ - 384
90
+ transformer.h.11.attn.c_attn.weight:
91
+ - 128
92
+ - 384
93
+ transformer.h.11.attn.c_proj.bias:
94
+ - 128
95
+ transformer.h.11.attn.c_proj.weight:
96
+ - 128
97
+ - 128
98
+ transformer.h.11.ln_1.bias:
99
+ - 128
100
+ transformer.h.11.ln_1.weight:
101
+ - 128
102
+ transformer.h.11.ln_2.bias:
103
+ - 128
104
+ transformer.h.11.ln_2.weight:
105
+ - 128
106
+ transformer.h.11.mlp.c_fc.bias:
107
+ - 512
108
+ transformer.h.11.mlp.c_fc.weight:
109
+ - 128
110
+ - 512
111
+ transformer.h.11.mlp.c_proj.bias:
112
+ - 128
113
+ transformer.h.11.mlp.c_proj.weight:
114
+ - 512
115
+ - 128
116
+ transformer.h.2.attn.c_attn.bias:
117
+ - 384
118
+ transformer.h.2.attn.c_attn.weight:
119
+ - 128
120
+ - 384
121
+ transformer.h.2.attn.c_proj.bias:
122
+ - 128
123
+ transformer.h.2.attn.c_proj.weight:
124
+ - 128
125
+ - 128
126
+ transformer.h.2.ln_1.bias:
127
+ - 128
128
+ transformer.h.2.ln_1.weight:
129
+ - 128
130
+ transformer.h.2.ln_2.bias:
131
+ - 128
132
+ transformer.h.2.ln_2.weight:
133
+ - 128
134
+ transformer.h.2.mlp.c_fc.bias:
135
+ - 512
136
+ transformer.h.2.mlp.c_fc.weight:
137
+ - 128
138
+ - 512
139
+ transformer.h.2.mlp.c_proj.bias:
140
+ - 128
141
+ transformer.h.2.mlp.c_proj.weight:
142
+ - 512
143
+ - 128
144
+ transformer.h.3.attn.c_attn.bias:
145
+ - 384
146
+ transformer.h.3.attn.c_attn.weight:
147
+ - 128
148
+ - 384
149
+ transformer.h.3.attn.c_proj.bias:
150
+ - 128
151
+ transformer.h.3.attn.c_proj.weight:
152
+ - 128
153
+ - 128
154
+ transformer.h.3.ln_1.bias:
155
+ - 128
156
+ transformer.h.3.ln_1.weight:
157
+ - 128
158
+ transformer.h.3.ln_2.bias:
159
+ - 128
160
+ transformer.h.3.ln_2.weight:
161
+ - 128
162
+ transformer.h.3.mlp.c_fc.bias:
163
+ - 512
164
+ transformer.h.3.mlp.c_fc.weight:
165
+ - 128
166
+ - 512
167
+ transformer.h.3.mlp.c_proj.bias:
168
+ - 128
169
+ transformer.h.3.mlp.c_proj.weight:
170
+ - 512
171
+ - 128
172
+ transformer.h.4.attn.c_attn.bias:
173
+ - 384
174
+ transformer.h.4.attn.c_attn.weight:
175
+ - 128
176
+ - 384
177
+ transformer.h.4.attn.c_proj.bias:
178
+ - 128
179
+ transformer.h.4.attn.c_proj.weight:
180
+ - 128
181
+ - 128
182
+ transformer.h.4.ln_1.bias:
183
+ - 128
184
+ transformer.h.4.ln_1.weight:
185
+ - 128
186
+ transformer.h.4.ln_2.bias:
187
+ - 128
188
+ transformer.h.4.ln_2.weight:
189
+ - 128
190
+ transformer.h.4.mlp.c_fc.bias:
191
+ - 512
192
+ transformer.h.4.mlp.c_fc.weight:
193
+ - 128
194
+ - 512
195
+ transformer.h.4.mlp.c_proj.bias:
196
+ - 128
197
+ transformer.h.4.mlp.c_proj.weight:
198
+ - 512
199
+ - 128
200
+ transformer.h.5.attn.c_attn.bias:
201
+ - 384
202
+ transformer.h.5.attn.c_attn.weight:
203
+ - 128
204
+ - 384
205
+ transformer.h.5.attn.c_proj.bias:
206
+ - 128
207
+ transformer.h.5.attn.c_proj.weight:
208
+ - 128
209
+ - 128
210
+ transformer.h.5.ln_1.bias:
211
+ - 128
212
+ transformer.h.5.ln_1.weight:
213
+ - 128
214
+ transformer.h.5.ln_2.bias:
215
+ - 128
216
+ transformer.h.5.ln_2.weight:
217
+ - 128
218
+ transformer.h.5.mlp.c_fc.bias:
219
+ - 512
220
+ transformer.h.5.mlp.c_fc.weight:
221
+ - 128
222
+ - 512
223
+ transformer.h.5.mlp.c_proj.bias:
224
+ - 128
225
+ transformer.h.5.mlp.c_proj.weight:
226
+ - 512
227
+ - 128
228
+ transformer.h.6.attn.c_attn.bias:
229
+ - 384
230
+ transformer.h.6.attn.c_attn.weight:
231
+ - 128
232
+ - 384
233
+ transformer.h.6.attn.c_proj.bias:
234
+ - 128
235
+ transformer.h.6.attn.c_proj.weight:
236
+ - 128
237
+ - 128
238
+ transformer.h.6.ln_1.bias:
239
+ - 128
240
+ transformer.h.6.ln_1.weight:
241
+ - 128
242
+ transformer.h.6.ln_2.bias:
243
+ - 128
244
+ transformer.h.6.ln_2.weight:
245
+ - 128
246
+ transformer.h.6.mlp.c_fc.bias:
247
+ - 512
248
+ transformer.h.6.mlp.c_fc.weight:
249
+ - 128
250
+ - 512
251
+ transformer.h.6.mlp.c_proj.bias:
252
+ - 128
253
+ transformer.h.6.mlp.c_proj.weight:
254
+ - 512
255
+ - 128
256
+ transformer.h.7.attn.c_attn.bias:
257
+ - 384
258
+ transformer.h.7.attn.c_attn.weight:
259
+ - 128
260
+ - 384
261
+ transformer.h.7.attn.c_proj.bias:
262
+ - 128
263
+ transformer.h.7.attn.c_proj.weight:
264
+ - 128
265
+ - 128
266
+ transformer.h.7.ln_1.bias:
267
+ - 128
268
+ transformer.h.7.ln_1.weight:
269
+ - 128
270
+ transformer.h.7.ln_2.bias:
271
+ - 128
272
+ transformer.h.7.ln_2.weight:
273
+ - 128
274
+ transformer.h.7.mlp.c_fc.bias:
275
+ - 512
276
+ transformer.h.7.mlp.c_fc.weight:
277
+ - 128
278
+ - 512
279
+ transformer.h.7.mlp.c_proj.bias:
280
+ - 128
281
+ transformer.h.7.mlp.c_proj.weight:
282
+ - 512
283
+ - 128
284
+ transformer.h.8.attn.c_attn.bias:
285
+ - 384
286
+ transformer.h.8.attn.c_attn.weight:
287
+ - 128
288
+ - 384
289
+ transformer.h.8.attn.c_proj.bias:
290
+ - 128
291
+ transformer.h.8.attn.c_proj.weight:
292
+ - 128
293
+ - 128
294
+ transformer.h.8.ln_1.bias:
295
+ - 128
296
+ transformer.h.8.ln_1.weight:
297
+ - 128
298
+ transformer.h.8.ln_2.bias:
299
+ - 128
300
+ transformer.h.8.ln_2.weight:
301
+ - 128
302
+ transformer.h.8.mlp.c_fc.bias:
303
+ - 512
304
+ transformer.h.8.mlp.c_fc.weight:
305
+ - 128
306
+ - 512
307
+ transformer.h.8.mlp.c_proj.bias:
308
+ - 128
309
+ transformer.h.8.mlp.c_proj.weight:
310
+ - 512
311
+ - 128
312
+ transformer.h.9.attn.c_attn.bias:
313
+ - 384
314
+ transformer.h.9.attn.c_attn.weight:
315
+ - 128
316
+ - 384
317
+ transformer.h.9.attn.c_proj.bias:
318
+ - 128
319
+ transformer.h.9.attn.c_proj.weight:
320
+ - 128
321
+ - 128
322
+ transformer.h.9.ln_1.bias:
323
+ - 128
324
+ transformer.h.9.ln_1.weight:
325
+ - 128
326
+ transformer.h.9.ln_2.bias:
327
+ - 128
328
+ transformer.h.9.ln_2.weight:
329
+ - 128
330
+ transformer.h.9.mlp.c_fc.bias:
331
+ - 512
332
+ transformer.h.9.mlp.c_fc.weight:
333
+ - 128
334
+ - 512
335
+ transformer.h.9.mlp.c_proj.bias:
336
+ - 128
337
+ transformer.h.9.mlp.c_proj.weight:
338
+ - 512
339
+ - 128
340
+ transformer.ln_f.bias:
341
+ - 128
342
+ transformer.ln_f.weight:
343
+ - 128
344
+ transformer.wpe.weight:
345
+ - null
346
+ - 128
347
+ transformer.wte.weight:
348
+ - null
349
+ - 128
mup-1b-100m-e3-full-gpt2lmmup.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 10.503863334655762, "eval_runtime": 35656.762, "eval_samples_per_second": 4.751, "eval_steps_per_second": 0.148, "epoch": 0.01}
 
 
mup-1b-100m-e3-full.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 10.503863334655762, "eval_runtime": 35656.762, "eval_samples_per_second": 4.751, "eval_steps_per_second": 0.148, "epoch": 0.01}
 
 
{mup-1b-100m-e3 → mup-1b-100m}/config.json RENAMED
@@ -29,6 +29,6 @@
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
- "use_cache": true,
33
  "vocab_size": 50257
34
  }
 
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
+ "use_cache": false,
33
  "vocab_size": 50257
34
  }
{mup-200m-100m-e3 → mup-1b-100m}/pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:600250335c93538740793cc7e8a4682defb6f7e202a3641016cf4ee1754bbe75
3
  size 6080539805
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db917f7fa18c7c062716f3b195c484ed752da15765806f35f76b3e7f24d4c40a
3
  size 6080539805
{mup-1b-100m-e3 → mup-1b-100m}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:010e79df4f1f8fc111c9685a914acb53060ce3dcaf804d961ae7df3e2ee72c14
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1282be713f72d5c7a8476389a830fe32aa9d2ce295fc83219bc57e7b5ecf2e01
3
  size 3387
mup-200m-100m-e3-full-gpt2lmmup.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 10.863893508911133, "eval_runtime": 35678.3953, "eval_samples_per_second": 4.748, "eval_steps_per_second": 0.148}
 
 
mup-200m-100m-e3-full.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 10.863893508911133, "eval_runtime": 35678.3953, "eval_samples_per_second": 4.748, "eval_steps_per_second": 0.148}
 
 
mup-200m-100m-e3/config.json DELETED
@@ -1,34 +0,0 @@
1
- {
2
- "activation_function": "gelu_new",
3
- "architectures": [
4
- "GPT2LMHeadModel"
5
- ],
6
- "attn_mult": 8.0,
7
- "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
- "embd_pdrop": 0.1,
10
- "eos_token_id": 50256,
11
- "initializer_range": 0.1,
12
- "intermediate_size": 12288,
13
- "layer_norm_epsilon": 1e-05,
14
- "model_type": "gpt2",
15
- "n_embd": 3072,
16
- "n_head": 64,
17
- "n_inner": null,
18
- "n_layer": 12,
19
- "n_positions": 1024,
20
- "num_layers": 12,
21
- "reorder_and_upcast_attn": false,
22
- "resid_pdrop": 0.1,
23
- "scale_attn_by_inverse_layer_idx": false,
24
- "scale_attn_weights": true,
25
- "summary_activation": null,
26
- "summary_first_dropout": 0.1,
27
- "summary_proj_to_labels": true,
28
- "summary_type": "cls_index",
29
- "summary_use_proj": true,
30
- "torch_dtype": "float32",
31
- "transformers_version": "4.25.1",
32
- "use_cache": true,
33
- "vocab_size": 50257
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{mup-3b5-100m-e3 → mup-200m-100m/checkpoint-100}/config.json RENAMED
@@ -9,11 +9,11 @@
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
  "initializer_range": 0.01,
12
- "intermediate_size": 18432,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
- "n_embd": 4608,
16
- "n_head": 128,
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
 
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
  "initializer_range": 0.01,
12
+ "intermediate_size": 4096,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
+ "n_embd": 1024,
16
+ "n_head": 32,
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
mup-200m-100m/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:594bf76f545f0586abbd020a129730f10f1e43891d81f7de5e3728d84d1f283f
3
+ size 1629471237
mup-200m-100m/checkpoint-100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3184aafc0c2d875a0eb5c978a5ed9ec2339f706fe9bd9a11bcc9bcd46ef6af66
3
+ size 827312701
{mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-100}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:850a75d03b68709ebdb3b0429ccf1c2bfbc264ba389a899cc590928049b771ed
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66ebba15e85e086af7736fdcf05e9cd682b1a2e05cbcf39415aaa86a3b656a0f
3
  size 14575
{mup-2b5-100m-e3 → mup-200m-100m}/checkpoint-100/scaler.pt RENAMED
File without changes
{mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-100}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ff11a668fd015bf8fec7ef706376a383794600ae7f4f86f3b9aeffe27fd39be
3
  size 691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac45f3b902e0fb39fb1b613f5d2434b6265aef8e4b3589db2a24c5a1ee39c2d4
3
  size 691
{mup-2b-100m-e3 → mup-200m-100m}/checkpoint-100/trainer_state.json RENAMED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2631010986526346,
5
  "global_step": 100,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
@@ -9,7 +9,7 @@
9
  "log_history": [],
10
  "max_steps": 380,
11
  "num_train_epochs": 1,
12
- "total_flos": 2.50936282841088e+17,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2630929869275672,
5
  "global_step": 100,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
 
9
  "log_history": [],
10
  "max_steps": 380,
11
  "num_train_epochs": 1,
12
+ "total_flos": 2.37748988411904e+16,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
{mup-2b-100m-e3 → mup-200m-100m}/checkpoint-100/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eed287ec22ab538923d37b36905111de89f29c3d9044950da371af0f29af7ad2
3
  size 3387
{mup-2b-100m-e3 → mup-200m-100m}/checkpoint-200/config.json RENAMED
@@ -9,11 +9,11 @@
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
  "initializer_range": 0.01,
12
- "intermediate_size": 13312,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
- "n_embd": 3328,
16
- "n_head": 128,
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
@@ -29,6 +29,6 @@
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
- "use_cache": true,
33
  "vocab_size": 50257
34
  }
 
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
  "initializer_range": 0.01,
12
+ "intermediate_size": 4096,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
15
+ "n_embd": 1024,
16
+ "n_head": 32,
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
 
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
+ "use_cache": false,
33
  "vocab_size": 50257
34
  }
mup-200m-100m/checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d39fe236529d8461887b25044dc49fb78fcf91a9c8d246a7e4b35ee6dc880d
3
+ size 1629471237
mup-200m-100m/checkpoint-200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a932c129b0ecd3b2869b4cb68a7fd8c0325d26175279038e1778b6558ed5c40d
3
+ size 827312701
{mup-2b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-200}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c85e1a7fcee9f0a4e6f8ebf1dc4a63a8c96bd1dd0a67ff0ff15c506aae95304
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4851f3b302b7c4fee9d9420148364c058cf57804100694ae01ab586d33879732
3
  size 14575
mup-200m-100m/checkpoint-200/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fccf0f9be1bb8f24861e4393745b3e09cc2687125a69e3757955fb0f0925ea5
3
+ size 557
{mup-2b-100m-e3/checkpoint-300 → mup-200m-100m/checkpoint-200}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30fd152d86dafeffe2455270e22218f0b6cab42a3c9b31a01b2bc6d8ceadc7c4
3
  size 691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88c8941f76ca00d1a3b4387d2503fd21325c149caf70f004ea30b3d20e440522
3
  size 691
{mup-2b-100m-e3 → mup-200m-100m}/checkpoint-200/trainer_state.json RENAMED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5262021973052692,
5
  "global_step": 200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
@@ -9,7 +9,7 @@
9
  "log_history": [],
10
  "max_steps": 380,
11
  "num_train_epochs": 1,
12
- "total_flos": 5.01872565682176e+17,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5261859738551344,
5
  "global_step": 200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
 
9
  "log_history": [],
10
  "max_steps": 380,
11
  "num_train_epochs": 1,
12
+ "total_flos": 4.75497976823808e+16,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
{mup-200m-100m-e3 → mup-200m-100m/checkpoint-200}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a9b9d007760086d2e55ee15759f906d50d344e948eb0bca87f0e434c9a4dd63
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eed287ec22ab538923d37b36905111de89f29c3d9044950da371af0f29af7ad2
3
  size 3387
mup-200m-100m/checkpoint-300/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 1024,
16
+ "n_head": 32,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": false,
33
+ "vocab_size": 50257
34
+ }
mup-200m-100m/checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354296c51c24eca0644e80a3ac24b80b59e558aa504dffd9a191e8d3d0e03053
3
+ size 1629471237
mup-200m-100m/checkpoint-300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87a43819750fecf47f4789e43fc5196dc2175cffef8785d0d916b9f439adad0a
3
+ size 827312701
{mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bccf3cda0d933f5f7bb7859163a2e83eefeef8218429908f35fc718e326740df
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08950dc609e5df5b063c350f814f906ffba4fb0870677e17789bafc766cec4b7
3
  size 14575
mup-200m-100m/checkpoint-300/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efafd90182e3d39d1b7c4a686f86e5913f5abc094dc3e2f827a6d479c6cef247
3
+ size 557
{mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96cd9e71e6cb99c3e129d0a33d4fa0d5f079e1ff04df09a4e8e7d88ca6a01a02
3
  size 691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f22762c604e0f6d64b39e692dc2d5b773eb950be2aea906ecde2da50f125b1ad
3
  size 691
{mup-3b-100m-e3/checkpoint-100 → mup-200m-100m/checkpoint-300}/trainer_state.json RENAMED
@@ -1,15 +1,15 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5262021973052692,
5
- "global_step": 100,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [],
10
- "max_steps": 190,
11
  "num_train_epochs": 1,
12
- "total_flos": 8.581652284440576e+17,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7892789607827017,
5
+ "global_step": 300,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [],
10
+ "max_steps": 380,
11
  "num_train_epochs": 1,
12
+ "total_flos": 7.13246965235712e+16,
13
  "trial_name": null,
14
  "trial_params": null
15
  }
{mup-2b-100m-e3/checkpoint-200 → mup-200m-100m/checkpoint-300}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eed287ec22ab538923d37b36905111de89f29c3d9044950da371af0f29af7ad2
3
  size 3387
mup-200m-100m/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_mult": 8.0,
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.01,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 1024,
16
+ "n_head": 32,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "num_layers": 12,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.25.1",
32
+ "use_cache": false,
33
+ "vocab_size": 50257
34
+ }
mup-200m-100m/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fce93a7c72644099928e5c71d3c690ab11190cce10bb69a429817748890e896d
3
+ size 827312701
mup-200m-100m/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eed287ec22ab538923d37b36905111de89f29c3d9044950da371af0f29af7ad2
3
+ size 3387
mup-2b-100m-e3-full-gpt2lmmup.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 9.270681381225586, "eval_runtime": 46206.7705, "eval_samples_per_second": 3.666, "eval_steps_per_second": 0.229}
 
 
mup-2b-100m-e3-full.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 9.270681381225586, "eval_runtime": 46206.7705, "eval_samples_per_second": 3.666, "eval_steps_per_second": 0.229}
 
 
mup-2b-100m-e3/checkpoint-200/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc8643fa7f495fb7235042b7a64c424947f35a3834c2e7ecaf81a5faff11ddb3
3
- size 14128712873
 
 
 
 
mup-2b-100m-e3/checkpoint-300/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:83e214ea9a2859b68902931336a551a4a675fe79c227fa5b6bf330b7d270514b
3
- size 14128712873
 
 
 
 
mup-2b-100m-e3/checkpoint-300/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a529bfebc81b3f76599a2706a7dedaa2e159f90434d17d7113ba289ad90ba128
3
- size 3387
 
 
 
 
mup-2b-100m-e3/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c72ffdc020dcc3f482979a1b900d14d4832f0f3f07845f1feaba439e3d93b68
3
- size 7076932005
 
 
 
 
mup-2b-100m-e3/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0360c4e8f458bf1a55bfbc7c49d3cd1b51ecf6c15f5985ea0900cb0d4d27bfb
3
- size 3387
 
 
 
 
{mup-2b-100m-e3/checkpoint-300 → mup-2b-100m}/config.json RENAMED
@@ -29,6 +29,6 @@
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
- "use_cache": true,
33
  "vocab_size": 50257
34
  }
 
29
  "summary_use_proj": true,
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.25.1",
32
+ "use_cache": false,
33
  "vocab_size": 50257
34
  }
{mup-2b-100m-e3/checkpoint-100 → mup-2b-100m}/pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9aff050f6759cb9c9bd9fb82d5908dc746bd5fa9f0754b969286830370959df2
3
  size 7076932005
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78adb59edc6f9e1259d95cfe9c9c4ff789abf403abcb72cd40a294b078e51653
3
  size 7076932005
mup-2b-100m/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97eee37826c3cd5dd0f1f8ec1f9fdde2d51145e25bafb34ffd1a872005f925d6
3
+ size 3387
mup-2b5-100m-e3-full-gpt2lmmup.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 6.969681739807129, "eval_runtime": 14651.8437, "eval_samples_per_second": 11.563, "eval_steps_per_second": 0.723, "epoch": 1.0}
 
 
mup-2b5-100m-e3-full.json DELETED
@@ -1 +0,0 @@
1
- {"eval_loss": 6.969681739807129, "eval_runtime": 14651.8437, "eval_samples_per_second": 11.563, "eval_steps_per_second": 0.723, "epoch": 1.0}
 
 
{mup-2b5-100m-e3 → mup-2b5-100m}/checkpoint-100/config.json RENAMED
File without changes
{mup-2b5-100m-e3 → mup-2b5-100m}/checkpoint-100/optimizer.pt RENAMED
File without changes