|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 216, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 8.33483565345429, |
|
"learning_rate": 2.2727272727272725e-08, |
|
"logits/chosen": -0.9294053316116333, |
|
"logits/rejected": -1.2306766510009766, |
|
"logps/chosen": -79.32324981689453, |
|
"logps/rejected": -124.87494659423828, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 8.957121309516765, |
|
"learning_rate": 2.2727272727272726e-07, |
|
"logits/chosen": -0.5779850482940674, |
|
"logits/rejected": -1.1789488792419434, |
|
"logps/chosen": -83.05654907226562, |
|
"logps/rejected": -135.26220703125, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4930555522441864, |
|
"rewards/chosen": 0.0003059397859033197, |
|
"rewards/margins": 0.00026924433768726885, |
|
"rewards/rejected": 3.6695546441478655e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 8.37389940695367, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": -0.621688723564148, |
|
"logits/rejected": -1.1858972311019897, |
|
"logps/chosen": -95.2262954711914, |
|
"logps/rejected": -143.64205932617188, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.0002137714036507532, |
|
"rewards/margins": -0.0006433525122702122, |
|
"rewards/rejected": 0.00042958109406754375, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 8.286301306160889, |
|
"learning_rate": 4.979050253066063e-07, |
|
"logits/chosen": -0.6890290975570679, |
|
"logits/rejected": -1.1448113918304443, |
|
"logps/chosen": -88.85906982421875, |
|
"logps/rejected": -132.80384826660156, |
|
"loss": 0.6917, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.0019540651701390743, |
|
"rewards/margins": 0.00375129166059196, |
|
"rewards/rejected": -0.0017972266068682075, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 8.434509757435025, |
|
"learning_rate": 4.894543310469967e-07, |
|
"logits/chosen": -0.5220860242843628, |
|
"logits/rejected": -1.1176507472991943, |
|
"logps/chosen": -91.24484252929688, |
|
"logps/rejected": -126.5925521850586, |
|
"loss": 0.6902, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0025604977272450924, |
|
"rewards/margins": 0.005932547152042389, |
|
"rewards/rejected": -0.003372048959136009, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 8.535183437584605, |
|
"learning_rate": 4.747379352713488e-07, |
|
"logits/chosen": -0.685869574546814, |
|
"logits/rejected": -1.2057311534881592, |
|
"logps/chosen": -96.0764389038086, |
|
"logps/rejected": -127.81497955322266, |
|
"loss": 0.6884, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.003552153008058667, |
|
"rewards/margins": 0.010397722944617271, |
|
"rewards/rejected": -0.006845570169389248, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 8.190633609066436, |
|
"learning_rate": 4.541409157643027e-07, |
|
"logits/chosen": -0.6526767015457153, |
|
"logits/rejected": -1.1397744417190552, |
|
"logps/chosen": -89.37004089355469, |
|
"logps/rejected": -124.7223129272461, |
|
"loss": 0.6851, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.0007335222908295691, |
|
"rewards/margins": 0.016615714877843857, |
|
"rewards/rejected": -0.01588219217956066, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 8.847754167480884, |
|
"learning_rate": 4.282022261367073e-07, |
|
"logits/chosen": -0.4965807795524597, |
|
"logits/rejected": -1.2134766578674316, |
|
"logps/chosen": -86.97933959960938, |
|
"logps/rejected": -134.1288299560547, |
|
"loss": 0.6824, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.0005543470033444464, |
|
"rewards/margins": 0.023061692714691162, |
|
"rewards/rejected": -0.02250734530389309, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 8.841125645791609, |
|
"learning_rate": 3.9760059325148063e-07, |
|
"logits/chosen": -0.6324743032455444, |
|
"logits/rejected": -1.147215485572815, |
|
"logps/chosen": -91.2511978149414, |
|
"logps/rejected": -149.07229614257812, |
|
"loss": 0.6791, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": 0.0010622118134051561, |
|
"rewards/margins": 0.032594919204711914, |
|
"rewards/rejected": -0.03153270110487938, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 8.826466564074398, |
|
"learning_rate": 3.6313675726113475e-07, |
|
"logits/chosen": -0.6752112507820129, |
|
"logits/rejected": -1.1697431802749634, |
|
"logps/chosen": -83.96028900146484, |
|
"logps/rejected": -135.6808624267578, |
|
"loss": 0.6766, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.007847856730222702, |
|
"rewards/margins": 0.033581167459487915, |
|
"rewards/rejected": -0.04142902046442032, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 8.986918539516395, |
|
"learning_rate": 3.2571251897448763e-07, |
|
"logits/chosen": -0.7318952083587646, |
|
"logits/rejected": -1.1481754779815674, |
|
"logps/chosen": -104.59893798828125, |
|
"logps/rejected": -132.4669952392578, |
|
"loss": 0.6729, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.011359195224940777, |
|
"rewards/margins": 0.043643511831760406, |
|
"rewards/rejected": -0.055002711713314056, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 8.709487324526142, |
|
"learning_rate": 2.863071428113726e-07, |
|
"logits/chosen": -0.6006079316139221, |
|
"logits/rejected": -1.0975641012191772, |
|
"logps/chosen": -84.71573638916016, |
|
"logps/rejected": -149.96249389648438, |
|
"loss": 0.6686, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.0164839755743742, |
|
"rewards/margins": 0.04694540053606033, |
|
"rewards/rejected": -0.06342937797307968, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 9.237752042690602, |
|
"learning_rate": 2.459517327993746e-07, |
|
"logits/chosen": -0.6878473162651062, |
|
"logits/rejected": -1.3123815059661865, |
|
"logps/chosen": -90.21903991699219, |
|
"logps/rejected": -151.0259552001953, |
|
"loss": 0.6615, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": -0.019475463777780533, |
|
"rewards/margins": 0.07015477865934372, |
|
"rewards/rejected": -0.08963023871183395, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 9.286029411981152, |
|
"learning_rate": 2.0570225210519433e-07, |
|
"logits/chosen": -0.790011465549469, |
|
"logits/rejected": -1.2298551797866821, |
|
"logps/chosen": -93.1588363647461, |
|
"logps/rejected": -151.1782684326172, |
|
"loss": 0.661, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.030178239569067955, |
|
"rewards/margins": 0.07631012797355652, |
|
"rewards/rejected": -0.10648836940526962, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 10.19246601057813, |
|
"learning_rate": 1.6661189208729489e-07, |
|
"logits/chosen": -0.6443529725074768, |
|
"logits/rejected": -1.133919596672058, |
|
"logps/chosen": -84.59071350097656, |
|
"logps/rejected": -138.85736083984375, |
|
"loss": 0.6579, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.04141147807240486, |
|
"rewards/margins": 0.07676441967487335, |
|
"rewards/rejected": -0.11817590147256851, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 9.407025901804323, |
|
"learning_rate": 1.2970351387729872e-07, |
|
"logits/chosen": -0.73204106092453, |
|
"logits/rejected": -1.1557518243789673, |
|
"logps/chosen": -88.95694732666016, |
|
"logps/rejected": -131.26490783691406, |
|
"loss": 0.652, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": -0.03920191153883934, |
|
"rewards/margins": 0.08074145019054413, |
|
"rewards/rejected": -0.11994334310293198, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 8.654405006026039, |
|
"learning_rate": 9.594288359976815e-08, |
|
"logits/chosen": -0.7579759359359741, |
|
"logits/rejected": -1.263896107673645, |
|
"logps/chosen": -86.85200500488281, |
|
"logps/rejected": -138.72409057617188, |
|
"loss": 0.6501, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.047876037657260895, |
|
"rewards/margins": 0.09631510823965073, |
|
"rewards/rejected": -0.14419114589691162, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 8.716288122934651, |
|
"learning_rate": 6.621340157319996e-08, |
|
"logits/chosen": -0.736527681350708, |
|
"logits/rejected": -1.3233287334442139, |
|
"logps/chosen": -99.1356430053711, |
|
"logps/rejected": -159.6076202392578, |
|
"loss": 0.6487, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": -0.06683658063411713, |
|
"rewards/margins": 0.10212481021881104, |
|
"rewards/rejected": -0.16896137595176697, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 9.622323797016332, |
|
"learning_rate": 4.1292986742682254e-08, |
|
"logits/chosen": -0.7446157932281494, |
|
"logits/rejected": -1.380061149597168, |
|
"logps/chosen": -96.81314849853516, |
|
"logps/rejected": -152.16970825195312, |
|
"loss": 0.6474, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.06702615320682526, |
|
"rewards/margins": 0.1122649684548378, |
|
"rewards/rejected": -0.17929109930992126, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 10.2499777892521, |
|
"learning_rate": 2.183372119961499e-08, |
|
"logits/chosen": -0.695759117603302, |
|
"logits/rejected": -1.323012351989746, |
|
"logps/chosen": -100.9570541381836, |
|
"logps/rejected": -148.34474182128906, |
|
"loss": 0.6454, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.07095570862293243, |
|
"rewards/margins": 0.09609047323465347, |
|
"rewards/rejected": -0.1670461893081665, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 9.240245078846256, |
|
"learning_rate": 8.344787421847216e-09, |
|
"logits/chosen": -0.7457176446914673, |
|
"logits/rejected": -1.3048522472381592, |
|
"logps/chosen": -97.51776885986328, |
|
"logps/rejected": -142.3980712890625, |
|
"loss": 0.6493, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.07124415785074234, |
|
"rewards/margins": 0.09479977190494537, |
|
"rewards/rejected": -0.16604390740394592, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 9.838380839286996, |
|
"learning_rate": 1.1791447083465133e-09, |
|
"logits/chosen": -0.8287758827209473, |
|
"logits/rejected": -1.403287649154663, |
|
"logps/chosen": -104.15522766113281, |
|
"logps/rejected": -157.41732788085938, |
|
"loss": 0.6432, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": -0.08230911195278168, |
|
"rewards/margins": 0.10910389572381973, |
|
"rewards/rejected": -0.191413015127182, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 216, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6676328546471066, |
|
"train_runtime": 2437.1919, |
|
"train_samples_per_second": 5.672, |
|
"train_steps_per_second": 0.089 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 216, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|