|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 501, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001996007984031936, |
|
"grad_norm": 16.492788975984936, |
|
"learning_rate": 1.9607843137254902e-08, |
|
"logits/chosen": 1.3011724948883057, |
|
"logits/rejected": 1.3879833221435547, |
|
"logps/chosen": -0.9583934545516968, |
|
"logps/rejected": -0.908146858215332, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00998003992015968, |
|
"grad_norm": 18.616408754077472, |
|
"learning_rate": 9.80392156862745e-08, |
|
"logits/chosen": 1.230096697807312, |
|
"logits/rejected": 1.2526068687438965, |
|
"logps/chosen": -0.8566818833351135, |
|
"logps/rejected": -0.9451415538787842, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.34375, |
|
"rewards/chosen": -0.00325668603181839, |
|
"rewards/margins": -0.0027664820663630962, |
|
"rewards/rejected": -0.0004902039654552937, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01996007984031936, |
|
"grad_norm": 19.72943287901331, |
|
"learning_rate": 1.96078431372549e-07, |
|
"logits/chosen": 1.1732714176177979, |
|
"logits/rejected": 1.2325665950775146, |
|
"logps/chosen": -0.8571804165840149, |
|
"logps/rejected": -0.9293950200080872, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.0004458148032426834, |
|
"rewards/margins": 0.00015245657414197922, |
|
"rewards/rejected": -0.0005982713773846626, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.029940119760479042, |
|
"grad_norm": 15.989882260008397, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": 1.1804090738296509, |
|
"logits/rejected": 1.227670669555664, |
|
"logps/chosen": -0.8887054324150085, |
|
"logps/rejected": -0.9415630102157593, |
|
"loss": 0.6942, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": -0.005856434814631939, |
|
"rewards/margins": -0.004528886638581753, |
|
"rewards/rejected": -0.0013275481760501862, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03992015968063872, |
|
"grad_norm": 21.617808491664096, |
|
"learning_rate": 3.92156862745098e-07, |
|
"logits/chosen": 1.2326724529266357, |
|
"logits/rejected": 1.239334225654602, |
|
"logps/chosen": -1.025779366493225, |
|
"logps/rejected": -0.9815893173217773, |
|
"loss": 0.6924, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.011453075334429741, |
|
"rewards/margins": -0.006528028752654791, |
|
"rewards/rejected": -0.004925046116113663, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0499001996007984, |
|
"grad_norm": 14.85293949325317, |
|
"learning_rate": 4.901960784313725e-07, |
|
"logits/chosen": 1.1621646881103516, |
|
"logits/rejected": 1.2202112674713135, |
|
"logps/chosen": -0.9435383081436157, |
|
"logps/rejected": -1.0095245838165283, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.008930020034313202, |
|
"rewards/margins": 0.011791445314884186, |
|
"rewards/rejected": -0.020721465349197388, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.059880239520958084, |
|
"grad_norm": 18.28049213488445, |
|
"learning_rate": 5.88235294117647e-07, |
|
"logits/chosen": 1.2528660297393799, |
|
"logits/rejected": 1.3036034107208252, |
|
"logps/chosen": -0.9394888877868652, |
|
"logps/rejected": -0.9887625575065613, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.020373398438096046, |
|
"rewards/margins": -0.0037106983363628387, |
|
"rewards/rejected": -0.016662701964378357, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06986027944111776, |
|
"grad_norm": 15.999817261895291, |
|
"learning_rate": 6.862745098039216e-07, |
|
"logits/chosen": 1.1911170482635498, |
|
"logits/rejected": 1.2343870401382446, |
|
"logps/chosen": -0.9691197276115417, |
|
"logps/rejected": -0.9998400807380676, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.01592147909104824, |
|
"rewards/margins": 0.0014309640973806381, |
|
"rewards/rejected": -0.01735244318842888, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.07984031936127745, |
|
"grad_norm": 14.867532956796651, |
|
"learning_rate": 7.84313725490196e-07, |
|
"logits/chosen": 1.2250399589538574, |
|
"logits/rejected": 1.2877371311187744, |
|
"logps/chosen": -0.9882305860519409, |
|
"logps/rejected": -1.013457179069519, |
|
"loss": 0.6866, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.014133378863334656, |
|
"rewards/margins": 0.023214522749185562, |
|
"rewards/rejected": -0.03734790161252022, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08982035928143713, |
|
"grad_norm": 19.324457876267314, |
|
"learning_rate": 8.823529411764705e-07, |
|
"logits/chosen": 1.2738926410675049, |
|
"logits/rejected": 1.2850474119186401, |
|
"logps/chosen": -0.9675604701042175, |
|
"logps/rejected": -1.0223758220672607, |
|
"loss": 0.6804, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.010748568922281265, |
|
"rewards/margins": 0.0401780791580677, |
|
"rewards/rejected": -0.05092664435505867, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0998003992015968, |
|
"grad_norm": 18.41965619240879, |
|
"learning_rate": 9.80392156862745e-07, |
|
"logits/chosen": 1.2459691762924194, |
|
"logits/rejected": 1.3021132946014404, |
|
"logps/chosen": -0.9164802432060242, |
|
"logps/rejected": -1.013667345046997, |
|
"loss": 0.6739, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.026862669736146927, |
|
"rewards/margins": 0.07785479724407196, |
|
"rewards/rejected": -0.10471747070550919, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10978043912175649, |
|
"grad_norm": 16.897558680765105, |
|
"learning_rate": 9.99805057520177e-07, |
|
"logits/chosen": 1.1725871562957764, |
|
"logits/rejected": 1.173532247543335, |
|
"logps/chosen": -0.9711742401123047, |
|
"logps/rejected": -0.9595904350280762, |
|
"loss": 0.6738, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.03100317157804966, |
|
"rewards/margins": 0.11643954366445541, |
|
"rewards/rejected": -0.14744271337985992, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.11976047904191617, |
|
"grad_norm": 14.265898399249132, |
|
"learning_rate": 9.990133642141357e-07, |
|
"logits/chosen": 1.1897612810134888, |
|
"logits/rejected": 1.2000809907913208, |
|
"logps/chosen": -0.9207404255867004, |
|
"logps/rejected": -0.9565631747245789, |
|
"loss": 0.6693, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.06663166731595993, |
|
"rewards/margins": 0.18790733814239502, |
|
"rewards/rejected": -0.25453901290893555, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12974051896207583, |
|
"grad_norm": 19.11690615448299, |
|
"learning_rate": 9.976136999909155e-07, |
|
"logits/chosen": 1.261150598526001, |
|
"logits/rejected": 1.2706950902938843, |
|
"logps/chosen": -0.9832024574279785, |
|
"logps/rejected": -0.9762998819351196, |
|
"loss": 0.6736, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.08731752634048462, |
|
"rewards/margins": 0.037145014852285385, |
|
"rewards/rejected": -0.1244625449180603, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.13972055888223553, |
|
"grad_norm": 16.308947856088572, |
|
"learning_rate": 9.956077701257707e-07, |
|
"logits/chosen": 1.1949059963226318, |
|
"logits/rejected": 1.2373930215835571, |
|
"logps/chosen": -0.923254132270813, |
|
"logps/rejected": -0.9537776708602905, |
|
"loss": 0.6588, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.0689881294965744, |
|
"rewards/margins": 0.0771615132689476, |
|
"rewards/rejected": -0.1461496502161026, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1497005988023952, |
|
"grad_norm": 18.31597399561215, |
|
"learning_rate": 9.929980185352525e-07, |
|
"logits/chosen": 1.1303855180740356, |
|
"logits/rejected": 1.166472315788269, |
|
"logps/chosen": -1.0079195499420166, |
|
"logps/rejected": -1.0075769424438477, |
|
"loss": 0.6486, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.026683980599045753, |
|
"rewards/margins": 0.0837903693318367, |
|
"rewards/rejected": -0.1104743480682373, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1596806387225549, |
|
"grad_norm": 16.142069213484454, |
|
"learning_rate": 9.89787624799672e-07, |
|
"logits/chosen": 1.1199300289154053, |
|
"logits/rejected": 1.1778942346572876, |
|
"logps/chosen": -0.9652664065361023, |
|
"logps/rejected": -1.0307899713516235, |
|
"loss": 0.6496, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.0005544237792491913, |
|
"rewards/margins": 0.10419625043869019, |
|
"rewards/rejected": -0.10475067049264908, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16966067864271456, |
|
"grad_norm": 15.306215682029483, |
|
"learning_rate": 9.859805002892731e-07, |
|
"logits/chosen": 1.1645634174346924, |
|
"logits/rejected": 1.1519969701766968, |
|
"logps/chosen": -0.9917243719100952, |
|
"logps/rejected": -1.060416340827942, |
|
"loss": 0.661, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.09171438962221146, |
|
"rewards/margins": 0.20642364025115967, |
|
"rewards/rejected": -0.2981380820274353, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.17964071856287425, |
|
"grad_norm": 15.254186655042064, |
|
"learning_rate": 9.81581283398829e-07, |
|
"logits/chosen": 1.103563904762268, |
|
"logits/rejected": 1.1300289630889893, |
|
"logps/chosen": -0.969647228717804, |
|
"logps/rejected": -0.9916777610778809, |
|
"loss": 0.6337, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.013281228020787239, |
|
"rewards/margins": 0.1793346405029297, |
|
"rewards/rejected": -0.1660534292459488, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18962075848303392, |
|
"grad_norm": 15.282160453652757, |
|
"learning_rate": 9.765953338964734e-07, |
|
"logits/chosen": 1.0921884775161743, |
|
"logits/rejected": 1.1226933002471924, |
|
"logps/chosen": -1.0730102062225342, |
|
"logps/rejected": -1.1024601459503174, |
|
"loss": 0.6385, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.0058188484981656075, |
|
"rewards/margins": 0.21344614028930664, |
|
"rewards/rejected": -0.2076272964477539, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1996007984031936, |
|
"grad_norm": 21.61748232589367, |
|
"learning_rate": 9.710287263936483e-07, |
|
"logits/chosen": 1.0795061588287354, |
|
"logits/rejected": 1.0822367668151855, |
|
"logps/chosen": -1.0821864604949951, |
|
"logps/rejected": -1.1186153888702393, |
|
"loss": 0.621, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.07217474281787872, |
|
"rewards/margins": 0.3310089707374573, |
|
"rewards/rejected": -0.25883427262306213, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.20958083832335328, |
|
"grad_norm": 23.129340339628143, |
|
"learning_rate": 9.648882429441256e-07, |
|
"logits/chosen": 1.0758105516433716, |
|
"logits/rejected": 1.0959206819534302, |
|
"logps/chosen": -0.9559111595153809, |
|
"logps/rejected": -0.9702944755554199, |
|
"loss": 0.6278, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.027358528226614, |
|
"rewards/margins": 0.17237558960914612, |
|
"rewards/rejected": -0.1997341364622116, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.21956087824351297, |
|
"grad_norm": 15.179993551058994, |
|
"learning_rate": 9.581813647811197e-07, |
|
"logits/chosen": 1.094158411026001, |
|
"logits/rejected": 1.114650011062622, |
|
"logps/chosen": -0.9534612894058228, |
|
"logps/rejected": -0.9674752354621887, |
|
"loss": 0.6124, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.058080364018678665, |
|
"rewards/margins": 0.30136996507644653, |
|
"rewards/rejected": -0.24328958988189697, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22954091816367264, |
|
"grad_norm": 13.705886523650493, |
|
"learning_rate": 9.509162632025569e-07, |
|
"logits/chosen": 1.0607202053070068, |
|
"logits/rejected": 1.045424461364746, |
|
"logps/chosen": -1.1245949268341064, |
|
"logps/rejected": -1.1509120464324951, |
|
"loss": 0.6167, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.07293939590454102, |
|
"rewards/margins": 0.17659255862236023, |
|
"rewards/rejected": -0.24953195452690125, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.23952095808383234, |
|
"grad_norm": 27.63745397695, |
|
"learning_rate": 9.431017896156073e-07, |
|
"logits/chosen": 1.0884307622909546, |
|
"logits/rejected": 1.101423978805542, |
|
"logps/chosen": -0.9731513857841492, |
|
"logps/rejected": -1.023944616317749, |
|
"loss": 0.6061, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.055571239441633224, |
|
"rewards/margins": 0.36377543210983276, |
|
"rewards/rejected": -0.3082042336463928, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.249500998003992, |
|
"grad_norm": 14.89222449165367, |
|
"learning_rate": 9.347474647526095e-07, |
|
"logits/chosen": 1.0270758867263794, |
|
"logits/rejected": 1.061629056930542, |
|
"logps/chosen": -0.9520384073257446, |
|
"logps/rejected": -0.9609392285346985, |
|
"loss": 0.5982, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.03607760742306709, |
|
"rewards/margins": 0.205668643116951, |
|
"rewards/rejected": -0.24174626171588898, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.25948103792415167, |
|
"grad_norm": 16.598481366896863, |
|
"learning_rate": 9.258634670715237e-07, |
|
"logits/chosen": 0.9625661969184875, |
|
"logits/rejected": 0.9923114776611328, |
|
"logps/chosen": -0.9781230092048645, |
|
"logps/rejected": -1.0380465984344482, |
|
"loss": 0.6033, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.08731977641582489, |
|
"rewards/margins": 0.32325008511543274, |
|
"rewards/rejected": -0.41056984663009644, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2694610778443114, |
|
"grad_norm": 17.332548482467526, |
|
"learning_rate": 9.164606203550497e-07, |
|
"logits/chosen": 1.0289146900177002, |
|
"logits/rejected": 1.0656378269195557, |
|
"logps/chosen": -0.898147463798523, |
|
"logps/rejected": -0.949881911277771, |
|
"loss": 0.602, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.01896672695875168, |
|
"rewards/margins": 0.33230799436569214, |
|
"rewards/rejected": -0.351274698972702, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.27944111776447106, |
|
"grad_norm": 16.198930205544908, |
|
"learning_rate": 9.065503805235137e-07, |
|
"logits/chosen": 1.0169068574905396, |
|
"logits/rejected": 1.0443648099899292, |
|
"logps/chosen": -0.9837234616279602, |
|
"logps/rejected": -1.015891432762146, |
|
"loss": 0.5962, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.11362324655056, |
|
"rewards/margins": 0.29118040204048157, |
|
"rewards/rejected": -0.40480366349220276, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2894211576846307, |
|
"grad_norm": 14.26896893631501, |
|
"learning_rate": 8.961448216775953e-07, |
|
"logits/chosen": 0.9983326196670532, |
|
"logits/rejected": 0.984410285949707, |
|
"logps/chosen": -1.0478496551513672, |
|
"logps/rejected": -1.0458667278289795, |
|
"loss": 0.6106, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.384859174489975, |
|
"rewards/margins": 0.23550406098365784, |
|
"rewards/rejected": -0.6203632354736328, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.2994011976047904, |
|
"grad_norm": 17.114721290662626, |
|
"learning_rate": 8.852566213878946e-07, |
|
"logits/chosen": 0.9434808492660522, |
|
"logits/rejected": 0.9709761738777161, |
|
"logps/chosen": -0.946380615234375, |
|
"logps/rejected": -1.011891484260559, |
|
"loss": 0.6051, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.10636933147907257, |
|
"rewards/margins": 0.38454684615135193, |
|
"rewards/rejected": -0.4909161627292633, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3093812375249501, |
|
"grad_norm": 12.532368784046184, |
|
"learning_rate": 8.73899045249266e-07, |
|
"logits/chosen": 0.9879257082939148, |
|
"logits/rejected": 1.0284427404403687, |
|
"logps/chosen": -0.9887377619743347, |
|
"logps/rejected": -1.0425301790237427, |
|
"loss": 0.5785, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.18314163386821747, |
|
"rewards/margins": 0.3873257339000702, |
|
"rewards/rejected": -0.5704673528671265, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.3193612774451098, |
|
"grad_norm": 20.653306879612042, |
|
"learning_rate": 8.620859307187338e-07, |
|
"logits/chosen": 0.9365812540054321, |
|
"logits/rejected": 0.9702633619308472, |
|
"logps/chosen": -0.9792872667312622, |
|
"logps/rejected": -1.0119134187698364, |
|
"loss": 0.5915, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.28814950585365295, |
|
"rewards/margins": 0.3159632086753845, |
|
"rewards/rejected": -0.6041126847267151, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.32934131736526945, |
|
"grad_norm": 46.72961396087624, |
|
"learning_rate": 8.498316702566826e-07, |
|
"logits/chosen": 1.0088741779327393, |
|
"logits/rejected": 1.0628801584243774, |
|
"logps/chosen": -1.0191477537155151, |
|
"logps/rejected": -1.0589340925216675, |
|
"loss": 0.5784, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.43470463156700134, |
|
"rewards/margins": 0.3699150085449219, |
|
"rewards/rejected": -0.8046196103096008, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.3393213572854291, |
|
"grad_norm": 17.875828780004216, |
|
"learning_rate": 8.371511937918617e-07, |
|
"logits/chosen": 0.8878538012504578, |
|
"logits/rejected": 0.9223688840866089, |
|
"logps/chosen": -0.9879400134086609, |
|
"logps/rejected": -1.029184103012085, |
|
"loss": 0.6033, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.3266729712486267, |
|
"rewards/margins": 0.5894866585731506, |
|
"rewards/rejected": -0.9161597490310669, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.34930139720558884, |
|
"grad_norm": 16.753564996079323, |
|
"learning_rate": 8.240599505315654e-07, |
|
"logits/chosen": 0.8993169665336609, |
|
"logits/rejected": 0.931827187538147, |
|
"logps/chosen": -1.0300993919372559, |
|
"logps/rejected": -1.104100227355957, |
|
"loss": 0.5719, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.26795631647109985, |
|
"rewards/margins": 0.4897838234901428, |
|
"rewards/rejected": -0.7577401399612427, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.3592814371257485, |
|
"grad_norm": 37.583807172799176, |
|
"learning_rate": 8.105738901391551e-07, |
|
"logits/chosen": 0.9503185153007507, |
|
"logits/rejected": 0.9612258076667786, |
|
"logps/chosen": -0.9984599947929382, |
|
"logps/rejected": -1.0410314798355103, |
|
"loss": 0.5749, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.3058738708496094, |
|
"rewards/margins": 0.4171958565711975, |
|
"rewards/rejected": -0.7230697274208069, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.36926147704590817, |
|
"grad_norm": 31.25023262450056, |
|
"learning_rate": 7.967094433018508e-07, |
|
"logits/chosen": 0.8538301587104797, |
|
"logits/rejected": 0.8765438199043274, |
|
"logps/chosen": -1.0639922618865967, |
|
"logps/rejected": -1.1690170764923096, |
|
"loss": 0.5628, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.31266123056411743, |
|
"rewards/margins": 0.6013716459274292, |
|
"rewards/rejected": -0.9140329360961914, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.37924151696606784, |
|
"grad_norm": 21.697159728289254, |
|
"learning_rate": 7.82483501712469e-07, |
|
"logits/chosen": 0.8337196111679077, |
|
"logits/rejected": 0.8647937774658203, |
|
"logps/chosen": -1.0461093187332153, |
|
"logps/rejected": -1.0427898168563843, |
|
"loss": 0.5701, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.48062148690223694, |
|
"rewards/margins": 0.4651462435722351, |
|
"rewards/rejected": -0.9457677602767944, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.38922155688622756, |
|
"grad_norm": 13.452601846054984, |
|
"learning_rate": 7.679133974894982e-07, |
|
"logits/chosen": 0.8574188351631165, |
|
"logits/rejected": 0.8771845102310181, |
|
"logps/chosen": -1.0106494426727295, |
|
"logps/rejected": -1.1300721168518066, |
|
"loss": 0.5649, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.5489331483840942, |
|
"rewards/margins": 0.6363680958747864, |
|
"rewards/rejected": -1.1853011846542358, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.3992015968063872, |
|
"grad_norm": 14.045999687736995, |
|
"learning_rate": 7.530168820605818e-07, |
|
"logits/chosen": 0.8429155349731445, |
|
"logits/rejected": 0.8586641550064087, |
|
"logps/chosen": -1.1243480443954468, |
|
"logps/rejected": -1.126988172531128, |
|
"loss": 0.5556, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.611405074596405, |
|
"rewards/margins": 0.5212133526802063, |
|
"rewards/rejected": -1.1326184272766113, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4091816367265469, |
|
"grad_norm": 11.17840118621038, |
|
"learning_rate": 7.378121045351377e-07, |
|
"logits/chosen": 0.8805230855941772, |
|
"logits/rejected": 0.8616092801094055, |
|
"logps/chosen": -1.025155782699585, |
|
"logps/rejected": -1.1043505668640137, |
|
"loss": 0.5274, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.5864971280097961, |
|
"rewards/margins": 0.6259642839431763, |
|
"rewards/rejected": -1.2124613523483276, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.41916167664670656, |
|
"grad_norm": 17.02714464481709, |
|
"learning_rate": 7.223175895924637e-07, |
|
"logits/chosen": 0.8006958961486816, |
|
"logits/rejected": 0.8346986770629883, |
|
"logps/chosen": -1.0029652118682861, |
|
"logps/rejected": -1.0653495788574219, |
|
"loss": 0.5437, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.5938572287559509, |
|
"rewards/margins": 0.5961542725563049, |
|
"rewards/rejected": -1.1900113821029663, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4291417165668663, |
|
"grad_norm": 12.708513730083673, |
|
"learning_rate": 7.065522149122709e-07, |
|
"logits/chosen": 0.7250524759292603, |
|
"logits/rejected": 0.7448651790618896, |
|
"logps/chosen": -1.0977203845977783, |
|
"logps/rejected": -1.1671373844146729, |
|
"loss": 0.5296, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.8822553753852844, |
|
"rewards/margins": 0.5623547434806824, |
|
"rewards/rejected": -1.4446099996566772, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.43912175648702595, |
|
"grad_norm": 23.02316300872982, |
|
"learning_rate": 6.905351881751371e-07, |
|
"logits/chosen": 0.7448990345001221, |
|
"logits/rejected": 0.759792685508728, |
|
"logps/chosen": -1.0590717792510986, |
|
"logps/rejected": -1.1264311075210571, |
|
"loss": 0.5411, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.7196047306060791, |
|
"rewards/margins": 0.6396269798278809, |
|
"rewards/rejected": -1.35923171043396, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4491017964071856, |
|
"grad_norm": 15.317013674505024, |
|
"learning_rate": 6.742860236609076e-07, |
|
"logits/chosen": 0.7387903928756714, |
|
"logits/rejected": 0.7482835054397583, |
|
"logps/chosen": -1.0746328830718994, |
|
"logps/rejected": -1.0935697555541992, |
|
"loss": 0.5362, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.91168612241745, |
|
"rewards/margins": 0.43347805738449097, |
|
"rewards/rejected": -1.3451642990112305, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.4590818363273453, |
|
"grad_norm": 20.979897267224555, |
|
"learning_rate": 6.578245184735512e-07, |
|
"logits/chosen": 0.7530170679092407, |
|
"logits/rejected": 0.7971817851066589, |
|
"logps/chosen": -1.0877180099487305, |
|
"logps/rejected": -1.1133688688278198, |
|
"loss": 0.5297, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.7731893062591553, |
|
"rewards/margins": 0.6359795331954956, |
|
"rewards/rejected": -1.4091689586639404, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.469061876247505, |
|
"grad_norm": 23.654443057551042, |
|
"learning_rate": 6.411707284214383e-07, |
|
"logits/chosen": 0.7936985492706299, |
|
"logits/rejected": 0.8071283102035522, |
|
"logps/chosen": -1.1007978916168213, |
|
"logps/rejected": -1.175083041191101, |
|
"loss": 0.5189, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.8833421468734741, |
|
"rewards/margins": 0.6522087454795837, |
|
"rewards/rejected": -1.535550832748413, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.47904191616766467, |
|
"grad_norm": 17.617163550183303, |
|
"learning_rate": 6.243449435824276e-07, |
|
"logits/chosen": 0.7208539247512817, |
|
"logits/rejected": 0.7419721484184265, |
|
"logps/chosen": -0.9637038111686707, |
|
"logps/rejected": -1.0924652814865112, |
|
"loss": 0.5239, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.823961079120636, |
|
"rewards/margins": 0.9264790415763855, |
|
"rewards/rejected": -1.750440239906311, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.48902195608782434, |
|
"grad_norm": 23.429183199427385, |
|
"learning_rate": 6.073676635835316e-07, |
|
"logits/chosen": 0.7787541747093201, |
|
"logits/rejected": 0.8052628636360168, |
|
"logps/chosen": -1.0102448463439941, |
|
"logps/rejected": -1.2205474376678467, |
|
"loss": 0.5087, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.7600202560424805, |
|
"rewards/margins": 1.2399606704711914, |
|
"rewards/rejected": -1.9999806880950928, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.499001996007984, |
|
"grad_norm": 15.897882119964894, |
|
"learning_rate": 5.9025957262528e-07, |
|
"logits/chosen": 0.674079179763794, |
|
"logits/rejected": 0.6560205221176147, |
|
"logps/chosen": -1.0422561168670654, |
|
"logps/rejected": -1.15647292137146, |
|
"loss": 0.5129, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.9762313961982727, |
|
"rewards/margins": 0.9589719772338867, |
|
"rewards/rejected": -1.9352035522460938, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5089820359281437, |
|
"grad_norm": 23.926407573258526, |
|
"learning_rate": 5.730415142812058e-07, |
|
"logits/chosen": 0.6435045003890991, |
|
"logits/rejected": 0.6540420055389404, |
|
"logps/chosen": -1.1076724529266357, |
|
"logps/rejected": -1.2272051572799683, |
|
"loss": 0.5207, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.1593824625015259, |
|
"rewards/margins": 0.9408347010612488, |
|
"rewards/rejected": -2.10021710395813, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.5189620758483033, |
|
"grad_norm": 25.873872774153636, |
|
"learning_rate": 5.557344661031627e-07, |
|
"logits/chosen": 0.6078227758407593, |
|
"logits/rejected": 0.6234728097915649, |
|
"logps/chosen": -1.1247365474700928, |
|
"logps/rejected": -1.1961596012115479, |
|
"loss": 0.5558, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.4092304706573486, |
|
"rewards/margins": 0.7921189665794373, |
|
"rewards/rejected": -2.2013492584228516, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5289421157684631, |
|
"grad_norm": 14.32961070454234, |
|
"learning_rate": 5.383595140634093e-07, |
|
"logits/chosen": 0.634459912776947, |
|
"logits/rejected": 0.6405037641525269, |
|
"logps/chosen": -1.054018259048462, |
|
"logps/rejected": -1.1594079732894897, |
|
"loss": 0.5108, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.3187201023101807, |
|
"rewards/margins": 0.7670294046401978, |
|
"rewards/rejected": -2.085749626159668, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.5389221556886228, |
|
"grad_norm": 12.765239103380393, |
|
"learning_rate": 5.209378268645997e-07, |
|
"logits/chosen": 0.6027650237083435, |
|
"logits/rejected": 0.6287568211555481, |
|
"logps/chosen": -1.008739948272705, |
|
"logps/rejected": -1.1450139284133911, |
|
"loss": 0.5318, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.2432196140289307, |
|
"rewards/margins": 0.6896325349807739, |
|
"rewards/rejected": -1.932852029800415, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5489021956087824, |
|
"grad_norm": 17.632252940280708, |
|
"learning_rate": 5.034906301489807e-07, |
|
"logits/chosen": 0.6728609204292297, |
|
"logits/rejected": 0.6657983064651489, |
|
"logps/chosen": -1.1084893941879272, |
|
"logps/rejected": -1.182081937789917, |
|
"loss": 0.5413, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.4515244960784912, |
|
"rewards/margins": 0.8624111413955688, |
|
"rewards/rejected": -2.3139355182647705, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.5588822355289421, |
|
"grad_norm": 22.324779245169193, |
|
"learning_rate": 4.860391806382156e-07, |
|
"logits/chosen": 0.6076717972755432, |
|
"logits/rejected": 0.6105688810348511, |
|
"logps/chosen": -1.0900273323059082, |
|
"logps/rejected": -1.1191543340682983, |
|
"loss": 0.512, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -1.21976900100708, |
|
"rewards/margins": 0.7310382723808289, |
|
"rewards/rejected": -1.9508073329925537, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5688622754491018, |
|
"grad_norm": 18.31410229389494, |
|
"learning_rate": 4.686047402353433e-07, |
|
"logits/chosen": 0.6535500288009644, |
|
"logits/rejected": 0.6188856959342957, |
|
"logps/chosen": -1.0886776447296143, |
|
"logps/rejected": -1.1758826971054077, |
|
"loss": 0.4871, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -1.381490707397461, |
|
"rewards/margins": 0.8018454313278198, |
|
"rewards/rejected": -2.1833362579345703, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.5788423153692615, |
|
"grad_norm": 28.66415855277427, |
|
"learning_rate": 4.512085501204253e-07, |
|
"logits/chosen": 0.7206050157546997, |
|
"logits/rejected": 0.7634114623069763, |
|
"logps/chosen": -1.116304636001587, |
|
"logps/rejected": -1.1659752130508423, |
|
"loss": 0.553, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.4132583141326904, |
|
"rewards/margins": 0.657631516456604, |
|
"rewards/rejected": -2.070889711380005, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5888223552894212, |
|
"grad_norm": 17.845472804681062, |
|
"learning_rate": 4.338718048714387e-07, |
|
"logits/chosen": 0.6159842610359192, |
|
"logits/rejected": 0.6279784440994263, |
|
"logps/chosen": -1.0876820087432861, |
|
"logps/rejected": -1.1220331192016602, |
|
"loss": 0.5134, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -1.233598232269287, |
|
"rewards/margins": 0.5510263442993164, |
|
"rewards/rejected": -1.784624695777893, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.5988023952095808, |
|
"grad_norm": 22.693877555997958, |
|
"learning_rate": 4.166156266419489e-07, |
|
"logits/chosen": 0.6069675087928772, |
|
"logits/rejected": 0.6444044709205627, |
|
"logps/chosen": -1.0315536260604858, |
|
"logps/rejected": -1.1916632652282715, |
|
"loss": 0.5015, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.4063842296600342, |
|
"rewards/margins": 0.8028200268745422, |
|
"rewards/rejected": -2.2092041969299316, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6087824351297405, |
|
"grad_norm": 29.176362707547757, |
|
"learning_rate": 3.9946103942701775e-07, |
|
"logits/chosen": 0.6731375455856323, |
|
"logits/rejected": 0.6558948755264282, |
|
"logps/chosen": -1.0202916860580444, |
|
"logps/rejected": -1.1923191547393799, |
|
"loss": 0.4922, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.134216547012329, |
|
"rewards/margins": 0.8526105880737305, |
|
"rewards/rejected": -1.9868271350860596, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.6187624750499002, |
|
"grad_norm": 16.68482157863775, |
|
"learning_rate": 3.8242894344870495e-07, |
|
"logits/chosen": 0.6814749836921692, |
|
"logits/rejected": 0.6544811129570007, |
|
"logps/chosen": -1.1450976133346558, |
|
"logps/rejected": -1.2438080310821533, |
|
"loss": 0.518, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.6444076299667358, |
|
"rewards/margins": 0.7449675798416138, |
|
"rewards/rejected": -2.3893752098083496, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6287425149700598, |
|
"grad_norm": 24.235832517472502, |
|
"learning_rate": 3.6554008969236715e-07, |
|
"logits/chosen": 0.6473463773727417, |
|
"logits/rejected": 0.6526838541030884, |
|
"logps/chosen": -1.1155353784561157, |
|
"logps/rejected": -1.2412601709365845, |
|
"loss": 0.5036, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.5284216403961182, |
|
"rewards/margins": 0.7738613486289978, |
|
"rewards/rejected": -2.3022828102111816, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.6387225548902196, |
|
"grad_norm": 14.637858980431309, |
|
"learning_rate": 3.488150546247778e-07, |
|
"logits/chosen": 0.641592264175415, |
|
"logits/rejected": 0.6488014459609985, |
|
"logps/chosen": -1.0486619472503662, |
|
"logps/rejected": -1.117785930633545, |
|
"loss": 0.5076, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.3681632280349731, |
|
"rewards/margins": 0.7144045829772949, |
|
"rewards/rejected": -2.0825679302215576, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6487025948103793, |
|
"grad_norm": 14.611695255214977, |
|
"learning_rate": 3.3227421512487255e-07, |
|
"logits/chosen": 0.6020836234092712, |
|
"logits/rejected": 0.5903235673904419, |
|
"logps/chosen": -1.0694186687469482, |
|
"logps/rejected": -1.1450960636138916, |
|
"loss": 0.4988, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.495102047920227, |
|
"rewards/margins": 0.6063732504844666, |
|
"rewards/rejected": -2.101475238800049, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.6586826347305389, |
|
"grad_norm": 53.83501877641933, |
|
"learning_rate": 3.15937723657661e-07, |
|
"logits/chosen": 0.5937372446060181, |
|
"logits/rejected": 0.5983537435531616, |
|
"logps/chosen": -1.0911985635757446, |
|
"logps/rejected": -1.1970782279968262, |
|
"loss": 0.5292, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.5829055309295654, |
|
"rewards/margins": 0.6139482259750366, |
|
"rewards/rejected": -2.1968536376953125, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6686626746506986, |
|
"grad_norm": 22.263382020354737, |
|
"learning_rate": 2.9982548372155256e-07, |
|
"logits/chosen": 0.5615344047546387, |
|
"logits/rejected": 0.5818489789962769, |
|
"logps/chosen": -1.0336883068084717, |
|
"logps/rejected": -1.1681592464447021, |
|
"loss": 0.5036, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.2369213104248047, |
|
"rewards/margins": 1.0152759552001953, |
|
"rewards/rejected": -2.252197265625, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.6786427145708582, |
|
"grad_norm": 28.648845585620847, |
|
"learning_rate": 2.8395712559900874e-07, |
|
"logits/chosen": 0.5865073204040527, |
|
"logits/rejected": 0.5887445211410522, |
|
"logps/chosen": -1.2165284156799316, |
|
"logps/rejected": -1.285627007484436, |
|
"loss": 0.5287, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.876123070716858, |
|
"rewards/margins": 0.8445480465888977, |
|
"rewards/rejected": -2.7206711769104004, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.688622754491018, |
|
"grad_norm": 27.7967265314739, |
|
"learning_rate": 2.683519824400692e-07, |
|
"logits/chosen": 0.6274746656417847, |
|
"logits/rejected": 0.6499799489974976, |
|
"logps/chosen": -1.1229360103607178, |
|
"logps/rejected": -1.2630244493484497, |
|
"loss": 0.5228, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.6735212802886963, |
|
"rewards/margins": 0.8694586753845215, |
|
"rewards/rejected": -2.5429797172546387, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.6986027944111777, |
|
"grad_norm": 22.50466227883414, |
|
"learning_rate": 2.530290667078846e-07, |
|
"logits/chosen": 0.620844841003418, |
|
"logits/rejected": 0.6195460557937622, |
|
"logps/chosen": -1.1165497303009033, |
|
"logps/rejected": -1.1850183010101318, |
|
"loss": 0.5019, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.5181663036346436, |
|
"rewards/margins": 0.7825000286102295, |
|
"rewards/rejected": -2.300666332244873, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7085828343313373, |
|
"grad_norm": 21.26239260918309, |
|
"learning_rate": 2.380070470149605e-07, |
|
"logits/chosen": 0.6597515940666199, |
|
"logits/rejected": 0.6680246591567993, |
|
"logps/chosen": -1.1044973134994507, |
|
"logps/rejected": -1.2354698181152344, |
|
"loss": 0.4914, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.4524997472763062, |
|
"rewards/margins": 1.0394423007965088, |
|
"rewards/rejected": -2.4919424057006836, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.718562874251497, |
|
"grad_norm": 30.986440285036473, |
|
"learning_rate": 2.23304225378328e-07, |
|
"logits/chosen": 0.5663326978683472, |
|
"logits/rejected": 0.5595775842666626, |
|
"logps/chosen": -1.2192121744155884, |
|
"logps/rejected": -1.2986266613006592, |
|
"loss": 0.4991, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.6858431100845337, |
|
"rewards/margins": 1.0494027137756348, |
|
"rewards/rejected": -2.735245704650879, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7285429141716567, |
|
"grad_norm": 31.154865644231116, |
|
"learning_rate": 2.0893851492135532e-07, |
|
"logits/chosen": 0.5782791376113892, |
|
"logits/rejected": 0.5733534097671509, |
|
"logps/chosen": -1.108449935913086, |
|
"logps/rejected": -1.2320600748062134, |
|
"loss": 0.4969, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.4567809104919434, |
|
"rewards/margins": 1.0569483041763306, |
|
"rewards/rejected": -2.5137288570404053, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.7385229540918163, |
|
"grad_norm": 25.665361579684205, |
|
"learning_rate": 1.9492741804936618e-07, |
|
"logits/chosen": 0.5590307116508484, |
|
"logits/rejected": 0.5289372205734253, |
|
"logps/chosen": -1.135460615158081, |
|
"logps/rejected": -1.1982572078704834, |
|
"loss": 0.5123, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.5751354694366455, |
|
"rewards/margins": 0.7708719968795776, |
|
"rewards/rejected": -2.346007823944092, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7485029940119761, |
|
"grad_norm": 13.466456049238714, |
|
"learning_rate": 1.812880051256551e-07, |
|
"logits/chosen": 0.5437551736831665, |
|
"logits/rejected": 0.540005087852478, |
|
"logps/chosen": -1.1057159900665283, |
|
"logps/rejected": -1.1711170673370361, |
|
"loss": 0.4956, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.607832670211792, |
|
"rewards/margins": 0.5974467396736145, |
|
"rewards/rejected": -2.205279588699341, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.7584830339321357, |
|
"grad_norm": 22.830693166932168, |
|
"learning_rate": 1.6803689367387918e-07, |
|
"logits/chosen": 0.5521084666252136, |
|
"logits/rejected": 0.563881516456604, |
|
"logps/chosen": -1.0188974142074585, |
|
"logps/rejected": -1.0934759378433228, |
|
"loss": 0.5364, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.6915432214736938, |
|
"rewards/margins": 0.41655927896499634, |
|
"rewards/rejected": -2.108102560043335, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.7684630738522954, |
|
"grad_norm": 16.717527209109356, |
|
"learning_rate": 1.551902281321651e-07, |
|
"logits/chosen": 0.6741904020309448, |
|
"logits/rejected": 0.6558393836021423, |
|
"logps/chosen": -1.153136134147644, |
|
"logps/rejected": -1.2626793384552002, |
|
"loss": 0.4885, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -1.4042468070983887, |
|
"rewards/margins": 1.1682214736938477, |
|
"rewards/rejected": -2.5724682807922363, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.7784431137724551, |
|
"grad_norm": 21.35977175588003, |
|
"learning_rate": 1.4276366018359842e-07, |
|
"logits/chosen": 0.5667208433151245, |
|
"logits/rejected": 0.5196897387504578, |
|
"logps/chosen": -1.1266697645187378, |
|
"logps/rejected": -1.2171615362167358, |
|
"loss": 0.5164, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.5344607830047607, |
|
"rewards/margins": 0.9302822947502136, |
|
"rewards/rejected": -2.464743137359619, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7884231536926147, |
|
"grad_norm": 21.011004494402407, |
|
"learning_rate": 1.3077232968705805e-07, |
|
"logits/chosen": 0.581541121006012, |
|
"logits/rejected": 0.5735315084457397, |
|
"logps/chosen": -1.1142101287841797, |
|
"logps/rejected": -1.204474687576294, |
|
"loss": 0.4938, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -1.4655622243881226, |
|
"rewards/margins": 0.7967506647109985, |
|
"rewards/rejected": -2.262312650680542, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.7984031936127745, |
|
"grad_norm": 21.73473549415687, |
|
"learning_rate": 1.192308462316317e-07, |
|
"logits/chosen": 0.49174603819847107, |
|
"logits/rejected": 0.5455900430679321, |
|
"logps/chosen": -1.1133276224136353, |
|
"logps/rejected": -1.2326544523239136, |
|
"loss": 0.5169, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -1.5614674091339111, |
|
"rewards/margins": 0.7484425902366638, |
|
"rewards/rejected": -2.309910297393799, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8083832335329342, |
|
"grad_norm": 18.213898305037567, |
|
"learning_rate": 1.0815327133708013e-07, |
|
"logits/chosen": 0.5780737400054932, |
|
"logits/rejected": 0.6099402904510498, |
|
"logps/chosen": -1.0353389978408813, |
|
"logps/rejected": -1.1462222337722778, |
|
"loss": 0.5117, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.678412675857544, |
|
"rewards/margins": 0.7521916031837463, |
|
"rewards/rejected": -2.4306042194366455, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.8183632734530938, |
|
"grad_norm": 14.585406444054382, |
|
"learning_rate": 9.755310132204297e-08, |
|
"logits/chosen": 0.5654971599578857, |
|
"logits/rejected": 0.5592461824417114, |
|
"logps/chosen": -1.0622889995574951, |
|
"logps/rejected": -1.2071908712387085, |
|
"loss": 0.5324, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.5269814729690552, |
|
"rewards/margins": 1.1922402381896973, |
|
"rewards/rejected": -2.719221591949463, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8283433133732535, |
|
"grad_norm": 23.842758561266724, |
|
"learning_rate": 8.744325086085247e-08, |
|
"logits/chosen": 0.5762808322906494, |
|
"logits/rejected": 0.5668230652809143, |
|
"logps/chosen": -1.1735143661499023, |
|
"logps/rejected": -1.2483824491500854, |
|
"loss": 0.504, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.7859411239624023, |
|
"rewards/margins": 0.8882120847702026, |
|
"rewards/rejected": -2.6741533279418945, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.8383233532934131, |
|
"grad_norm": 41.62435891120011, |
|
"learning_rate": 7.783603724899257e-08, |
|
"logits/chosen": 0.5172094106674194, |
|
"logits/rejected": 0.5516868829727173, |
|
"logps/chosen": -1.145578145980835, |
|
"logps/rejected": -1.248124599456787, |
|
"loss": 0.5313, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.742923378944397, |
|
"rewards/margins": 0.8347417712211609, |
|
"rewards/rejected": -2.577665090560913, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8483033932135728, |
|
"grad_norm": 26.49206528554136, |
|
"learning_rate": 6.874316539637126e-08, |
|
"logits/chosen": 0.5917887091636658, |
|
"logits/rejected": 0.6128644943237305, |
|
"logps/chosen": -1.0141854286193848, |
|
"logps/rejected": -1.1528804302215576, |
|
"loss": 0.4854, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.2433063983917236, |
|
"rewards/margins": 0.826849639415741, |
|
"rewards/rejected": -2.0701558589935303, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.8582834331337326, |
|
"grad_norm": 14.052261790037898, |
|
"learning_rate": 6.017571356669182e-08, |
|
"logits/chosen": 0.5848813652992249, |
|
"logits/rejected": 0.5729445219039917, |
|
"logps/chosen": -1.1584407091140747, |
|
"logps/rejected": -1.304329752922058, |
|
"loss": 0.5097, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.7920862436294556, |
|
"rewards/margins": 0.670229971408844, |
|
"rewards/rejected": -2.4623162746429443, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.8682634730538922, |
|
"grad_norm": 18.538952507210343, |
|
"learning_rate": 5.2144119880293544e-08, |
|
"logits/chosen": 0.5895706415176392, |
|
"logits/rejected": 0.5678045749664307, |
|
"logps/chosen": -1.075687050819397, |
|
"logps/rejected": -1.2755447626113892, |
|
"loss": 0.5187, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.3742207288742065, |
|
"rewards/margins": 1.1585171222686768, |
|
"rewards/rejected": -2.532737970352173, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.8782435129740519, |
|
"grad_norm": 17.852951128940067, |
|
"learning_rate": 4.465816959691149e-08, |
|
"logits/chosen": 0.5652838945388794, |
|
"logits/rejected": 0.6052907705307007, |
|
"logps/chosen": -1.2037353515625, |
|
"logps/rejected": -1.3117984533309937, |
|
"loss": 0.5009, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.7123981714248657, |
|
"rewards/margins": 1.019058108329773, |
|
"rewards/rejected": -2.7314560413360596, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8882235528942116, |
|
"grad_norm": 14.249216111089481, |
|
"learning_rate": 3.7726983193843485e-08, |
|
"logits/chosen": 0.517819344997406, |
|
"logits/rejected": 0.5205559730529785, |
|
"logps/chosen": -1.0231516361236572, |
|
"logps/rejected": -1.148457646369934, |
|
"loss": 0.5199, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.4192359447479248, |
|
"rewards/margins": 0.889240562915802, |
|
"rewards/rejected": -2.308476448059082, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.8982035928143712, |
|
"grad_norm": 17.926055468482602, |
|
"learning_rate": 3.135900525405427e-08, |
|
"logits/chosen": 0.5483254194259644, |
|
"logits/rejected": 0.5569725036621094, |
|
"logps/chosen": -1.160649299621582, |
|
"logps/rejected": -1.2841194868087769, |
|
"loss": 0.5171, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.7122529745101929, |
|
"rewards/margins": 0.9245160222053528, |
|
"rewards/rejected": -2.6367690563201904, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.908183632734531, |
|
"grad_norm": 15.183540302368447, |
|
"learning_rate": 2.5561994177751732e-08, |
|
"logits/chosen": 0.6299312114715576, |
|
"logits/rejected": 0.6476989984512329, |
|
"logps/chosen": -1.0939618349075317, |
|
"logps/rejected": -1.187743902206421, |
|
"loss": 0.4643, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.5630865097045898, |
|
"rewards/margins": 0.8125492334365845, |
|
"rewards/rejected": -2.3756356239318848, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.9181636726546906, |
|
"grad_norm": 15.769731536967763, |
|
"learning_rate": 2.0343012729971243e-08, |
|
"logits/chosen": 0.5560911893844604, |
|
"logits/rejected": 0.563130259513855, |
|
"logps/chosen": -1.1199856996536255, |
|
"logps/rejected": -1.2025185823440552, |
|
"loss": 0.4948, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.6438310146331787, |
|
"rewards/margins": 0.8590097427368164, |
|
"rewards/rejected": -2.502840518951416, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9281437125748503, |
|
"grad_norm": 21.7191545301948, |
|
"learning_rate": 1.570841943568446e-08, |
|
"logits/chosen": 0.6068538427352905, |
|
"logits/rejected": 0.6409121751785278, |
|
"logps/chosen": -1.211014747619629, |
|
"logps/rejected": -1.3489148616790771, |
|
"loss": 0.4823, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.6730806827545166, |
|
"rewards/margins": 1.099481463432312, |
|
"rewards/rejected": -2.772562026977539, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.93812375249501, |
|
"grad_norm": 23.87949640105234, |
|
"learning_rate": 1.166386083291604e-08, |
|
"logits/chosen": 0.5642676949501038, |
|
"logits/rejected": 0.5937134027481079, |
|
"logps/chosen": -1.0865347385406494, |
|
"logps/rejected": -1.2261168956756592, |
|
"loss": 0.5012, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.5355206727981567, |
|
"rewards/margins": 0.9762239456176758, |
|
"rewards/rejected": -2.511744260787964, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9481037924151696, |
|
"grad_norm": 11.821044673852356, |
|
"learning_rate": 8.214264593307096e-09, |
|
"logits/chosen": 0.5303646922111511, |
|
"logits/rejected": 0.550706684589386, |
|
"logps/chosen": -1.212477207183838, |
|
"logps/rejected": -1.27964186668396, |
|
"loss": 0.4881, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.8850059509277344, |
|
"rewards/margins": 0.7182316780090332, |
|
"rewards/rejected": -2.603238105773926, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.9580838323353293, |
|
"grad_norm": 18.686466292842862, |
|
"learning_rate": 5.3638335185058335e-09, |
|
"logits/chosen": 0.5736523866653442, |
|
"logits/rejected": 0.625065267086029, |
|
"logps/chosen": -1.2084397077560425, |
|
"logps/rejected": -1.3002406358718872, |
|
"loss": 0.5266, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -1.7023632526397705, |
|
"rewards/margins": 0.9809869527816772, |
|
"rewards/rejected": -2.6833503246307373, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9680638722554891, |
|
"grad_norm": 26.284460492309865, |
|
"learning_rate": 3.116040419701815e-09, |
|
"logits/chosen": 0.5889401435852051, |
|
"logits/rejected": 0.5715131759643555, |
|
"logps/chosen": -1.1137585639953613, |
|
"logps/rejected": -1.2124450206756592, |
|
"loss": 0.5171, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.5402872562408447, |
|
"rewards/margins": 0.7413524389266968, |
|
"rewards/rejected": -2.281639575958252, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.9780439121756487, |
|
"grad_norm": 30.03419204450396, |
|
"learning_rate": 1.4736238865398765e-09, |
|
"logits/chosen": 0.6326584815979004, |
|
"logits/rejected": 0.6649195551872253, |
|
"logps/chosen": -1.1648736000061035, |
|
"logps/rejected": -1.2586228847503662, |
|
"loss": 0.519, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.403749942779541, |
|
"rewards/margins": 1.0453355312347412, |
|
"rewards/rejected": -2.4490854740142822, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9880239520958084, |
|
"grad_norm": 33.25313115299262, |
|
"learning_rate": 4.3858495057080836e-10, |
|
"logits/chosen": 0.5985925197601318, |
|
"logits/rejected": 0.5628663301467896, |
|
"logps/chosen": -1.2969096899032593, |
|
"logps/rejected": -1.4230639934539795, |
|
"loss": 0.4764, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -1.819708228111267, |
|
"rewards/margins": 1.287508487701416, |
|
"rewards/rejected": -3.1072165966033936, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.998003992015968, |
|
"grad_norm": 30.55644687750284, |
|
"learning_rate": 1.2184647302626582e-11, |
|
"logits/chosen": 0.5701574087142944, |
|
"logits/rejected": 0.5943929553031921, |
|
"logps/chosen": -1.1318881511688232, |
|
"logps/rejected": -1.2142232656478882, |
|
"loss": 0.4976, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.6143200397491455, |
|
"rewards/margins": 0.6937130093574524, |
|
"rewards/rejected": -2.3080332279205322, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 501, |
|
"total_flos": 0.0, |
|
"train_loss": 0.559710203114146, |
|
"train_runtime": 2572.2381, |
|
"train_samples_per_second": 24.931, |
|
"train_steps_per_second": 0.195 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 501, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 101, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|