File size: 12,100 Bytes
9a95375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 500,
  "global_step": 216,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0,
      "grad_norm": 8.33483565345429,
      "learning_rate": 2.2727272727272725e-08,
      "logits/chosen": -0.9294053316116333,
      "logits/rejected": -1.2306766510009766,
      "logps/chosen": -79.32324981689453,
      "logps/rejected": -124.87494659423828,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.05,
      "grad_norm": 8.957121309516765,
      "learning_rate": 2.2727272727272726e-07,
      "logits/chosen": -0.5779850482940674,
      "logits/rejected": -1.1789488792419434,
      "logps/chosen": -83.05654907226562,
      "logps/rejected": -135.26220703125,
      "loss": 0.6932,
      "rewards/accuracies": 0.4930555522441864,
      "rewards/chosen": 0.0003059397859033197,
      "rewards/margins": 0.00026924433768726885,
      "rewards/rejected": 3.6695546441478655e-05,
      "step": 10
    },
    {
      "epoch": 0.09,
      "grad_norm": 8.37389940695367,
      "learning_rate": 4.545454545454545e-07,
      "logits/chosen": -0.621688723564148,
      "logits/rejected": -1.1858972311019897,
      "logps/chosen": -95.2262954711914,
      "logps/rejected": -143.64205932617188,
      "loss": 0.6931,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": -0.0002137714036507532,
      "rewards/margins": -0.0006433525122702122,
      "rewards/rejected": 0.00042958109406754375,
      "step": 20
    },
    {
      "epoch": 0.14,
      "grad_norm": 8.286301306160889,
      "learning_rate": 4.979050253066063e-07,
      "logits/chosen": -0.6890290975570679,
      "logits/rejected": -1.1448113918304443,
      "logps/chosen": -88.85906982421875,
      "logps/rejected": -132.80384826660156,
      "loss": 0.6917,
      "rewards/accuracies": 0.637499988079071,
      "rewards/chosen": 0.0019540651701390743,
      "rewards/margins": 0.00375129166059196,
      "rewards/rejected": -0.0017972266068682075,
      "step": 30
    },
    {
      "epoch": 0.19,
      "grad_norm": 8.434509757435025,
      "learning_rate": 4.894543310469967e-07,
      "logits/chosen": -0.5220860242843628,
      "logits/rejected": -1.1176507472991943,
      "logps/chosen": -91.24484252929688,
      "logps/rejected": -126.5925521850586,
      "loss": 0.6902,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.0025604977272450924,
      "rewards/margins": 0.005932547152042389,
      "rewards/rejected": -0.003372048959136009,
      "step": 40
    },
    {
      "epoch": 0.23,
      "grad_norm": 8.535183437584605,
      "learning_rate": 4.747379352713488e-07,
      "logits/chosen": -0.685869574546814,
      "logits/rejected": -1.2057311534881592,
      "logps/chosen": -96.0764389038086,
      "logps/rejected": -127.81497955322266,
      "loss": 0.6884,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": 0.003552153008058667,
      "rewards/margins": 0.010397722944617271,
      "rewards/rejected": -0.006845570169389248,
      "step": 50
    },
    {
      "epoch": 0.28,
      "grad_norm": 8.190633609066436,
      "learning_rate": 4.541409157643027e-07,
      "logits/chosen": -0.6526767015457153,
      "logits/rejected": -1.1397744417190552,
      "logps/chosen": -89.37004089355469,
      "logps/rejected": -124.7223129272461,
      "loss": 0.6851,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": 0.0007335222908295691,
      "rewards/margins": 0.016615714877843857,
      "rewards/rejected": -0.01588219217956066,
      "step": 60
    },
    {
      "epoch": 0.32,
      "grad_norm": 8.847754167480884,
      "learning_rate": 4.282022261367073e-07,
      "logits/chosen": -0.4965807795524597,
      "logits/rejected": -1.2134766578674316,
      "logps/chosen": -86.97933959960938,
      "logps/rejected": -134.1288299560547,
      "loss": 0.6824,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": 0.0005543470033444464,
      "rewards/margins": 0.023061692714691162,
      "rewards/rejected": -0.02250734530389309,
      "step": 70
    },
    {
      "epoch": 0.37,
      "grad_norm": 8.841125645791609,
      "learning_rate": 3.9760059325148063e-07,
      "logits/chosen": -0.6324743032455444,
      "logits/rejected": -1.147215485572815,
      "logps/chosen": -91.2511978149414,
      "logps/rejected": -149.07229614257812,
      "loss": 0.6791,
      "rewards/accuracies": 0.8187500238418579,
      "rewards/chosen": 0.0010622118134051561,
      "rewards/margins": 0.032594919204711914,
      "rewards/rejected": -0.03153270110487938,
      "step": 80
    },
    {
      "epoch": 0.42,
      "grad_norm": 8.826466564074398,
      "learning_rate": 3.6313675726113475e-07,
      "logits/chosen": -0.6752112507820129,
      "logits/rejected": -1.1697431802749634,
      "logps/chosen": -83.96028900146484,
      "logps/rejected": -135.6808624267578,
      "loss": 0.6766,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.007847856730222702,
      "rewards/margins": 0.033581167459487915,
      "rewards/rejected": -0.04142902046442032,
      "step": 90
    },
    {
      "epoch": 0.46,
      "grad_norm": 8.986918539516395,
      "learning_rate": 3.2571251897448763e-07,
      "logits/chosen": -0.7318952083587646,
      "logits/rejected": -1.1481754779815674,
      "logps/chosen": -104.59893798828125,
      "logps/rejected": -132.4669952392578,
      "loss": 0.6729,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.011359195224940777,
      "rewards/margins": 0.043643511831760406,
      "rewards/rejected": -0.055002711713314056,
      "step": 100
    },
    {
      "epoch": 0.51,
      "grad_norm": 8.709487324526142,
      "learning_rate": 2.863071428113726e-07,
      "logits/chosen": -0.6006079316139221,
      "logits/rejected": -1.0975641012191772,
      "logps/chosen": -84.71573638916016,
      "logps/rejected": -149.96249389648438,
      "loss": 0.6686,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.0164839755743742,
      "rewards/margins": 0.04694540053606033,
      "rewards/rejected": -0.06342937797307968,
      "step": 110
    },
    {
      "epoch": 0.56,
      "grad_norm": 9.237752042690602,
      "learning_rate": 2.459517327993746e-07,
      "logits/chosen": -0.6878473162651062,
      "logits/rejected": -1.3123815059661865,
      "logps/chosen": -90.21903991699219,
      "logps/rejected": -151.0259552001953,
      "loss": 0.6615,
      "rewards/accuracies": 0.8812500238418579,
      "rewards/chosen": -0.019475463777780533,
      "rewards/margins": 0.07015477865934372,
      "rewards/rejected": -0.08963023871183395,
      "step": 120
    },
    {
      "epoch": 0.6,
      "grad_norm": 9.286029411981152,
      "learning_rate": 2.0570225210519433e-07,
      "logits/chosen": -0.790011465549469,
      "logits/rejected": -1.2298551797866821,
      "logps/chosen": -93.1588363647461,
      "logps/rejected": -151.1782684326172,
      "loss": 0.661,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.030178239569067955,
      "rewards/margins": 0.07631012797355652,
      "rewards/rejected": -0.10648836940526962,
      "step": 130
    },
    {
      "epoch": 0.65,
      "grad_norm": 10.19246601057813,
      "learning_rate": 1.6661189208729489e-07,
      "logits/chosen": -0.6443529725074768,
      "logits/rejected": -1.133919596672058,
      "logps/chosen": -84.59071350097656,
      "logps/rejected": -138.85736083984375,
      "loss": 0.6579,
      "rewards/accuracies": 0.7875000238418579,
      "rewards/chosen": -0.04141147807240486,
      "rewards/margins": 0.07676441967487335,
      "rewards/rejected": -0.11817590147256851,
      "step": 140
    },
    {
      "epoch": 0.69,
      "grad_norm": 9.407025901804323,
      "learning_rate": 1.2970351387729872e-07,
      "logits/chosen": -0.73204106092453,
      "logits/rejected": -1.1557518243789673,
      "logps/chosen": -88.95694732666016,
      "logps/rejected": -131.26490783691406,
      "loss": 0.652,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": -0.03920191153883934,
      "rewards/margins": 0.08074145019054413,
      "rewards/rejected": -0.11994334310293198,
      "step": 150
    },
    {
      "epoch": 0.74,
      "grad_norm": 8.654405006026039,
      "learning_rate": 9.594288359976815e-08,
      "logits/chosen": -0.7579759359359741,
      "logits/rejected": -1.263896107673645,
      "logps/chosen": -86.85200500488281,
      "logps/rejected": -138.72409057617188,
      "loss": 0.6501,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.047876037657260895,
      "rewards/margins": 0.09631510823965073,
      "rewards/rejected": -0.14419114589691162,
      "step": 160
    },
    {
      "epoch": 0.79,
      "grad_norm": 8.716288122934651,
      "learning_rate": 6.621340157319996e-08,
      "logits/chosen": -0.736527681350708,
      "logits/rejected": -1.3233287334442139,
      "logps/chosen": -99.1356430053711,
      "logps/rejected": -159.6076202392578,
      "loss": 0.6487,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": -0.06683658063411713,
      "rewards/margins": 0.10212481021881104,
      "rewards/rejected": -0.16896137595176697,
      "step": 170
    },
    {
      "epoch": 0.83,
      "grad_norm": 9.622323797016332,
      "learning_rate": 4.1292986742682254e-08,
      "logits/chosen": -0.7446157932281494,
      "logits/rejected": -1.380061149597168,
      "logps/chosen": -96.81314849853516,
      "logps/rejected": -152.16970825195312,
      "loss": 0.6474,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.06702615320682526,
      "rewards/margins": 0.1122649684548378,
      "rewards/rejected": -0.17929109930992126,
      "step": 180
    },
    {
      "epoch": 0.88,
      "grad_norm": 10.2499777892521,
      "learning_rate": 2.183372119961499e-08,
      "logits/chosen": -0.695759117603302,
      "logits/rejected": -1.323012351989746,
      "logps/chosen": -100.9570541381836,
      "logps/rejected": -148.34474182128906,
      "loss": 0.6454,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.07095570862293243,
      "rewards/margins": 0.09609047323465347,
      "rewards/rejected": -0.1670461893081665,
      "step": 190
    },
    {
      "epoch": 0.93,
      "grad_norm": 9.240245078846256,
      "learning_rate": 8.344787421847216e-09,
      "logits/chosen": -0.7457176446914673,
      "logits/rejected": -1.3048522472381592,
      "logps/chosen": -97.51776885986328,
      "logps/rejected": -142.3980712890625,
      "loss": 0.6493,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.07124415785074234,
      "rewards/margins": 0.09479977190494537,
      "rewards/rejected": -0.16604390740394592,
      "step": 200
    },
    {
      "epoch": 0.97,
      "grad_norm": 9.838380839286996,
      "learning_rate": 1.1791447083465133e-09,
      "logits/chosen": -0.8287758827209473,
      "logits/rejected": -1.403287649154663,
      "logps/chosen": -104.15522766113281,
      "logps/rejected": -157.41732788085938,
      "loss": 0.6432,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.08230911195278168,
      "rewards/margins": 0.10910389572381973,
      "rewards/rejected": -0.191413015127182,
      "step": 210
    },
    {
      "epoch": 1.0,
      "step": 216,
      "total_flos": 0.0,
      "train_loss": 0.6676328546471066,
      "train_runtime": 2437.1919,
      "train_samples_per_second": 5.672,
      "train_steps_per_second": 0.089
    }
  ],
  "logging_steps": 10,
  "max_steps": 216,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 500,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}