yuyijiong's picture
Upload trainer_state.json with huggingface_hub
269fd94 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.998381162619573,
"eval_steps": 1,
"global_step": 848,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0235467255334805,
"grad_norm": 4.103935241699219,
"learning_rate": 3.060971262053483e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -652.0,
"logps/rejected": -592.0,
"loss": 0.6951,
"rewards/accuracies": 0.08124999701976776,
"rewards/chosen": -0.0006256103515625,
"rewards/margins": -0.0089111328125,
"rewards/rejected": 0.00830078125,
"step": 10
},
{
"epoch": 0.047093451066961,
"grad_norm": 3.962085247039795,
"learning_rate": 3.9824154277970135e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -592.0,
"logps/rejected": -520.0,
"loss": 0.6868,
"rewards/accuracies": 0.20000000298023224,
"rewards/chosen": 0.0006256103515625,
"rewards/margins": 0.00830078125,
"rewards/rejected": -0.007659912109375,
"step": 20
},
{
"epoch": 0.0706401766004415,
"grad_norm": 4.004490852355957,
"learning_rate": 4.521425711265269e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -608.0,
"logps/rejected": -524.0,
"loss": 0.6591,
"rewards/accuracies": 0.3812499940395355,
"rewards/chosen": 0.053955078125,
"rewards/margins": 0.076171875,
"rewards/rejected": -0.0220947265625,
"step": 30
},
{
"epoch": 0.094186902133922,
"grad_norm": 3.424196481704712,
"learning_rate": 4.903859593540544e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -504.0,
"loss": 0.6295,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.09521484375,
"rewards/margins": 0.1455078125,
"rewards/rejected": -0.05078125,
"step": 40
},
{
"epoch": 0.1177336276674025,
"grad_norm": 3.229916572570801,
"learning_rate": 4.962732919254658e-06,
"logits/chosen": -4.5625,
"logits/rejected": -4.53125,
"logps/chosen": -548.0,
"logps/rejected": -502.0,
"loss": 0.58,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.15234375,
"rewards/margins": 0.28125,
"rewards/rejected": -0.12890625,
"step": 50
},
{
"epoch": 0.141280353200883,
"grad_norm": 3.3798811435699463,
"learning_rate": 4.900621118012423e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -580.0,
"logps/rejected": -454.0,
"loss": 0.5537,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.302734375,
"rewards/margins": 0.423828125,
"rewards/rejected": -0.12158203125,
"step": 60
},
{
"epoch": 0.1648270787343635,
"grad_norm": 3.2893781661987305,
"learning_rate": 4.8385093167701866e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -608.0,
"logps/rejected": -508.0,
"loss": 0.5351,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.251953125,
"rewards/margins": 0.4609375,
"rewards/rejected": -0.208984375,
"step": 70
},
{
"epoch": 0.188373804267844,
"grad_norm": 3.3645501136779785,
"learning_rate": 4.776397515527951e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.53125,
"logps/chosen": -588.0,
"logps/rejected": -468.0,
"loss": 0.5096,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.34375,
"rewards/margins": 0.6796875,
"rewards/rejected": -0.3359375,
"step": 80
},
{
"epoch": 0.2119205298013245,
"grad_norm": 3.14998197555542,
"learning_rate": 4.714285714285715e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -632.0,
"logps/rejected": -490.0,
"loss": 0.4901,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.375,
"rewards/margins": 0.69140625,
"rewards/rejected": -0.318359375,
"step": 90
},
{
"epoch": 0.235467255334805,
"grad_norm": 3.2294790744781494,
"learning_rate": 4.652173913043478e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -580.0,
"logps/rejected": -506.0,
"loss": 0.4736,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.369140625,
"rewards/margins": 0.6328125,
"rewards/rejected": -0.263671875,
"step": 100
},
{
"epoch": 0.25901398086828553,
"grad_norm": 2.738917827606201,
"learning_rate": 4.590062111801243e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.59375,
"logps/chosen": -604.0,
"logps/rejected": -516.0,
"loss": 0.4703,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.5,
"rewards/margins": 0.9296875,
"rewards/rejected": -0.431640625,
"step": 110
},
{
"epoch": 0.282560706401766,
"grad_norm": 3.060237169265747,
"learning_rate": 4.527950310559007e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -632.0,
"logps/rejected": -552.0,
"loss": 0.4649,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.53515625,
"rewards/margins": 0.99609375,
"rewards/rejected": -0.462890625,
"step": 120
},
{
"epoch": 0.30610743193524653,
"grad_norm": 3.196960926055908,
"learning_rate": 4.46583850931677e-06,
"logits/chosen": -4.34375,
"logits/rejected": -4.34375,
"logps/chosen": -644.0,
"logps/rejected": -524.0,
"loss": 0.4824,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.54296875,
"rewards/margins": 1.0,
"rewards/rejected": -0.45703125,
"step": 130
},
{
"epoch": 0.329654157468727,
"grad_norm": 3.1361074447631836,
"learning_rate": 4.4037267080745344e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.40625,
"logps/chosen": -628.0,
"logps/rejected": -548.0,
"loss": 0.4539,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.447265625,
"rewards/margins": 0.9921875,
"rewards/rejected": -0.546875,
"step": 140
},
{
"epoch": 0.35320088300220753,
"grad_norm": 2.6170785427093506,
"learning_rate": 4.3416149068322985e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -580.0,
"logps/rejected": -512.0,
"loss": 0.4478,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.65625,
"rewards/margins": 1.0390625,
"rewards/rejected": -0.3828125,
"step": 150
},
{
"epoch": 0.376747608535688,
"grad_norm": 2.7828519344329834,
"learning_rate": 4.279503105590063e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.34375,
"logps/chosen": -608.0,
"logps/rejected": -506.0,
"loss": 0.4407,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.5859375,
"rewards/margins": 1.09375,
"rewards/rejected": -0.5078125,
"step": 160
},
{
"epoch": 0.40029433406916853,
"grad_norm": 2.906342029571533,
"learning_rate": 4.217391304347827e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -520.0,
"loss": 0.4309,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.6796875,
"rewards/margins": 1.2578125,
"rewards/rejected": -0.58203125,
"step": 170
},
{
"epoch": 0.423841059602649,
"grad_norm": 2.827556848526001,
"learning_rate": 4.15527950310559e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.53125,
"logps/chosen": -616.0,
"logps/rejected": -506.0,
"loss": 0.4508,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.6875,
"rewards/margins": 1.140625,
"rewards/rejected": -0.453125,
"step": 180
},
{
"epoch": 0.44738778513612953,
"grad_norm": 2.7354469299316406,
"learning_rate": 4.093167701863354e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -616.0,
"logps/rejected": -510.0,
"loss": 0.4341,
"rewards/accuracies": 0.78125,
"rewards/chosen": 0.78515625,
"rewards/margins": 1.3046875,
"rewards/rejected": -0.51953125,
"step": 190
},
{
"epoch": 0.47093451066961,
"grad_norm": 2.674173593521118,
"learning_rate": 4.031055900621118e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -600.0,
"logps/rejected": -474.0,
"loss": 0.4224,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.72265625,
"rewards/margins": 1.140625,
"rewards/rejected": -0.4140625,
"step": 200
},
{
"epoch": 0.49448123620309054,
"grad_norm": 3.0752134323120117,
"learning_rate": 3.968944099378882e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -628.0,
"logps/rejected": -584.0,
"loss": 0.4401,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.71875,
"rewards/margins": 1.2109375,
"rewards/rejected": -0.494140625,
"step": 210
},
{
"epoch": 0.5180279617365711,
"grad_norm": 3.02925181388855,
"learning_rate": 3.906832298136646e-06,
"logits/chosen": -4.34375,
"logits/rejected": -4.40625,
"logps/chosen": -644.0,
"logps/rejected": -532.0,
"loss": 0.4338,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.71875,
"rewards/margins": 1.0078125,
"rewards/rejected": -0.29296875,
"step": 220
},
{
"epoch": 0.5415746872700515,
"grad_norm": 2.621617078781128,
"learning_rate": 3.8447204968944105e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -616.0,
"logps/rejected": -552.0,
"loss": 0.4285,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 0.81640625,
"rewards/margins": 1.1640625,
"rewards/rejected": -0.345703125,
"step": 230
},
{
"epoch": 0.565121412803532,
"grad_norm": 2.96905779838562,
"learning_rate": 3.782608695652174e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.53125,
"logps/chosen": -560.0,
"logps/rejected": -486.0,
"loss": 0.4185,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 0.84375,
"rewards/margins": 1.40625,
"rewards/rejected": -0.55859375,
"step": 240
},
{
"epoch": 0.5886681383370125,
"grad_norm": 3.1430623531341553,
"learning_rate": 3.7204968944099383e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.53125,
"logps/chosen": -624.0,
"logps/rejected": -556.0,
"loss": 0.4288,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": 0.85546875,
"rewards/margins": 1.2890625,
"rewards/rejected": -0.435546875,
"step": 250
},
{
"epoch": 0.6122148638704931,
"grad_norm": 2.8485913276672363,
"learning_rate": 3.6583850931677024e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -552.0,
"loss": 0.4362,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.90625,
"rewards/margins": 1.3125,
"rewards/rejected": -0.41015625,
"step": 260
},
{
"epoch": 0.6357615894039735,
"grad_norm": 2.8013317584991455,
"learning_rate": 3.596273291925466e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -584.0,
"logps/rejected": -492.0,
"loss": 0.4193,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": 0.75,
"rewards/margins": 1.25,
"rewards/rejected": -0.498046875,
"step": 270
},
{
"epoch": 0.659308314937454,
"grad_norm": 2.7474491596221924,
"learning_rate": 3.53416149068323e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -604.0,
"logps/rejected": -472.0,
"loss": 0.4029,
"rewards/accuracies": 0.793749988079071,
"rewards/chosen": 0.8046875,
"rewards/margins": 1.3125,
"rewards/rejected": -0.5078125,
"step": 280
},
{
"epoch": 0.6828550404709345,
"grad_norm": 2.721045732498169,
"learning_rate": 3.472049689440994e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.46875,
"logps/chosen": -592.0,
"logps/rejected": -510.0,
"loss": 0.4111,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 1.09375,
"rewards/margins": 1.6015625,
"rewards/rejected": -0.51171875,
"step": 290
},
{
"epoch": 0.7064017660044151,
"grad_norm": 2.9466845989227295,
"learning_rate": 3.4099378881987584e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -568.0,
"logps/rejected": -468.0,
"loss": 0.4108,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.1015625,
"rewards/margins": 1.578125,
"rewards/rejected": -0.474609375,
"step": 300
},
{
"epoch": 0.7299484915378955,
"grad_norm": 2.3606228828430176,
"learning_rate": 3.347826086956522e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -488.0,
"loss": 0.4041,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.078125,
"rewards/margins": 1.4453125,
"rewards/rejected": -0.36328125,
"step": 310
},
{
"epoch": 0.753495217071376,
"grad_norm": 3.1279854774475098,
"learning_rate": 3.285714285714286e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.46875,
"logps/chosen": -564.0,
"logps/rejected": -510.0,
"loss": 0.4247,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 0.9609375,
"rewards/margins": 1.2265625,
"rewards/rejected": -0.26171875,
"step": 320
},
{
"epoch": 0.7770419426048565,
"grad_norm": 2.6707963943481445,
"learning_rate": 3.22360248447205e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.4375,
"logps/chosen": -580.0,
"logps/rejected": -492.0,
"loss": 0.3787,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.0390625,
"rewards/margins": 1.4453125,
"rewards/rejected": -0.400390625,
"step": 330
},
{
"epoch": 0.8005886681383371,
"grad_norm": 2.8069331645965576,
"learning_rate": 3.1614906832298135e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -632.0,
"logps/rejected": -600.0,
"loss": 0.4048,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.078125,
"rewards/margins": 1.3125,
"rewards/rejected": -0.228515625,
"step": 340
},
{
"epoch": 0.8241353936718175,
"grad_norm": 2.5299603939056396,
"learning_rate": 3.099378881987578e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.5,
"logps/chosen": -600.0,
"logps/rejected": -532.0,
"loss": 0.3885,
"rewards/accuracies": 0.78125,
"rewards/chosen": 1.1875,
"rewards/margins": 1.4375,
"rewards/rejected": -0.251953125,
"step": 350
},
{
"epoch": 0.847682119205298,
"grad_norm": 2.7665863037109375,
"learning_rate": 3.037267080745342e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -592.0,
"logps/rejected": -472.0,
"loss": 0.3892,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": 1.3515625,
"rewards/margins": 1.7578125,
"rewards/rejected": -0.404296875,
"step": 360
},
{
"epoch": 0.8712288447387785,
"grad_norm": 2.8109049797058105,
"learning_rate": 2.975155279503106e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.4375,
"logps/chosen": -580.0,
"logps/rejected": -524.0,
"loss": 0.4115,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": 1.171875,
"rewards/margins": 1.5234375,
"rewards/rejected": -0.349609375,
"step": 370
},
{
"epoch": 0.8947755702722591,
"grad_norm": 2.657787799835205,
"learning_rate": 2.9130434782608695e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.46875,
"logps/chosen": -568.0,
"logps/rejected": -532.0,
"loss": 0.3831,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": 1.1953125,
"rewards/margins": 1.578125,
"rewards/rejected": -0.38671875,
"step": 380
},
{
"epoch": 0.9183222958057395,
"grad_norm": 2.7588131427764893,
"learning_rate": 2.850931677018634e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -588.0,
"logps/rejected": -544.0,
"loss": 0.3865,
"rewards/accuracies": 0.8125,
"rewards/chosen": 1.234375,
"rewards/margins": 1.4453125,
"rewards/rejected": -0.216796875,
"step": 390
},
{
"epoch": 0.94186902133922,
"grad_norm": 3.0615074634552,
"learning_rate": 2.788819875776398e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -576.0,
"logps/rejected": -502.0,
"loss": 0.3967,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.1875,
"rewards/margins": 1.546875,
"rewards/rejected": -0.359375,
"step": 400
},
{
"epoch": 0.9654157468727005,
"grad_norm": 3.119372844696045,
"learning_rate": 2.7267080745341618e-06,
"logits/chosen": -4.53125,
"logits/rejected": -4.5,
"logps/chosen": -604.0,
"logps/rejected": -576.0,
"loss": 0.4029,
"rewards/accuracies": 0.8125,
"rewards/chosen": 1.28125,
"rewards/margins": 1.7265625,
"rewards/rejected": -0.451171875,
"step": 410
},
{
"epoch": 0.9889624724061811,
"grad_norm": 3.135815382003784,
"learning_rate": 2.6645962732919255e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.4375,
"logps/chosen": -628.0,
"logps/rejected": -544.0,
"loss": 0.3687,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.46875,
"rewards/margins": 1.671875,
"rewards/rejected": -0.1982421875,
"step": 420
},
{
"epoch": 1.0141280353200883,
"grad_norm": 1.4070848226547241,
"learning_rate": 2.6024844720496896e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -596.0,
"logps/rejected": -544.0,
"loss": 0.3031,
"rewards/accuracies": 0.8695651888847351,
"rewards/chosen": 1.4140625,
"rewards/margins": 1.8203125,
"rewards/rejected": -0.40234375,
"step": 430
},
{
"epoch": 1.0376747608535688,
"grad_norm": 1.7027989625930786,
"learning_rate": 2.5403726708074537e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -502.0,
"loss": 0.2702,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": 1.578125,
"rewards/margins": 2.21875,
"rewards/rejected": -0.64453125,
"step": 440
},
{
"epoch": 1.0612214863870493,
"grad_norm": 1.9917528629302979,
"learning_rate": 2.4782608695652178e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -584.0,
"logps/rejected": -508.0,
"loss": 0.2617,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 1.53125,
"rewards/margins": 2.046875,
"rewards/rejected": -0.51953125,
"step": 450
},
{
"epoch": 1.0847682119205297,
"grad_norm": 1.8857314586639404,
"learning_rate": 2.4161490683229814e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.46875,
"logps/chosen": -580.0,
"logps/rejected": -524.0,
"loss": 0.2539,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.703125,
"rewards/margins": 2.171875,
"rewards/rejected": -0.462890625,
"step": 460
},
{
"epoch": 1.1083149374540102,
"grad_norm": 1.778446912765503,
"learning_rate": 2.3540372670807455e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.53125,
"logps/chosen": -588.0,
"logps/rejected": -472.0,
"loss": 0.2617,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.5703125,
"rewards/margins": 2.03125,
"rewards/rejected": -0.46484375,
"step": 470
},
{
"epoch": 1.131861662987491,
"grad_norm": 1.5340267419815063,
"learning_rate": 2.2919254658385096e-06,
"logits/chosen": -4.34375,
"logits/rejected": -4.40625,
"logps/chosen": -644.0,
"logps/rejected": -608.0,
"loss": 0.2414,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.71875,
"rewards/margins": 2.40625,
"rewards/rejected": -0.6875,
"step": 480
},
{
"epoch": 1.1554083885209714,
"grad_norm": 1.7361700534820557,
"learning_rate": 2.2298136645962733e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.34375,
"logps/chosen": -612.0,
"logps/rejected": -564.0,
"loss": 0.2463,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.5859375,
"rewards/margins": 2.234375,
"rewards/rejected": -0.65625,
"step": 490
},
{
"epoch": 1.1789551140544519,
"grad_norm": 1.7070685625076294,
"learning_rate": 2.1677018633540374e-06,
"logits/chosen": -4.34375,
"logits/rejected": -4.3125,
"logps/chosen": -568.0,
"logps/rejected": -528.0,
"loss": 0.2385,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.6015625,
"rewards/margins": 2.09375,
"rewards/rejected": -0.5,
"step": 500
},
{
"epoch": 1.2025018395879323,
"grad_norm": 2.132335901260376,
"learning_rate": 2.1055900621118015e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -636.0,
"logps/rejected": -548.0,
"loss": 0.2412,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.734375,
"rewards/margins": 2.203125,
"rewards/rejected": -0.46484375,
"step": 510
},
{
"epoch": 1.2260485651214128,
"grad_norm": 2.0063838958740234,
"learning_rate": 2.0434782608695656e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.40625,
"logps/chosen": -576.0,
"logps/rejected": -552.0,
"loss": 0.2437,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.484375,
"rewards/margins": 2.171875,
"rewards/rejected": -0.69140625,
"step": 520
},
{
"epoch": 1.2495952906548933,
"grad_norm": 1.715623378753662,
"learning_rate": 1.9813664596273293e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.4375,
"logps/chosen": -620.0,
"logps/rejected": -556.0,
"loss": 0.2371,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.6328125,
"rewards/margins": 2.09375,
"rewards/rejected": -0.466796875,
"step": 530
},
{
"epoch": 1.2731420161883737,
"grad_norm": 1.8319090604782104,
"learning_rate": 1.9192546583850934e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.34375,
"logps/chosen": -564.0,
"logps/rejected": -470.0,
"loss": 0.2456,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.6015625,
"rewards/margins": 2.3125,
"rewards/rejected": -0.70703125,
"step": 540
},
{
"epoch": 1.2966887417218542,
"grad_norm": 1.757511854171753,
"learning_rate": 1.8571428571428573e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -604.0,
"logps/rejected": -528.0,
"loss": 0.2382,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.7265625,
"rewards/margins": 2.21875,
"rewards/rejected": -0.494140625,
"step": 550
},
{
"epoch": 1.3202354672553347,
"grad_norm": 1.6837247610092163,
"learning_rate": 1.7950310559006212e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -616.0,
"logps/rejected": -528.0,
"loss": 0.2339,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.8125,
"rewards/margins": 2.203125,
"rewards/rejected": -0.390625,
"step": 560
},
{
"epoch": 1.3437821927888154,
"grad_norm": 1.5681090354919434,
"learning_rate": 1.7329192546583853e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -572.0,
"logps/rejected": -496.0,
"loss": 0.2341,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.7890625,
"rewards/margins": 2.1875,
"rewards/rejected": -0.40625,
"step": 570
},
{
"epoch": 1.3673289183222959,
"grad_norm": 1.8752034902572632,
"learning_rate": 1.6708074534161492e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -532.0,
"logps/rejected": -512.0,
"loss": 0.2257,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.671875,
"rewards/margins": 2.390625,
"rewards/rejected": -0.71875,
"step": 580
},
{
"epoch": 1.3908756438557763,
"grad_norm": 1.8631969690322876,
"learning_rate": 1.608695652173913e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -488.0,
"loss": 0.2405,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.671875,
"rewards/margins": 2.28125,
"rewards/rejected": -0.61328125,
"step": 590
},
{
"epoch": 1.4144223693892568,
"grad_norm": 1.8408665657043457,
"learning_rate": 1.5465838509316772e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.40625,
"logps/chosen": -568.0,
"logps/rejected": -508.0,
"loss": 0.2346,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.6796875,
"rewards/margins": 2.34375,
"rewards/rejected": -0.6640625,
"step": 600
},
{
"epoch": 1.4379690949227373,
"grad_norm": 1.8455615043640137,
"learning_rate": 1.484472049689441e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -632.0,
"logps/rejected": -548.0,
"loss": 0.2415,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.8359375,
"rewards/margins": 2.25,
"rewards/rejected": -0.41015625,
"step": 610
},
{
"epoch": 1.4615158204562178,
"grad_norm": 1.7498400211334229,
"learning_rate": 1.4223602484472052e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -588.0,
"logps/rejected": -496.0,
"loss": 0.2428,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.8125,
"rewards/margins": 2.328125,
"rewards/rejected": -0.515625,
"step": 620
},
{
"epoch": 1.4850625459896984,
"grad_norm": 1.8195767402648926,
"learning_rate": 1.360248447204969e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -616.0,
"logps/rejected": -520.0,
"loss": 0.2408,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.8125,
"rewards/margins": 2.40625,
"rewards/rejected": -0.59375,
"step": 630
},
{
"epoch": 1.508609271523179,
"grad_norm": 1.84929621219635,
"learning_rate": 1.2981366459627332e-06,
"logits/chosen": -4.375,
"logits/rejected": -4.40625,
"logps/chosen": -596.0,
"logps/rejected": -492.0,
"loss": 0.2462,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.5859375,
"rewards/margins": 2.328125,
"rewards/rejected": -0.73046875,
"step": 640
},
{
"epoch": 1.5321559970566594,
"grad_norm": 1.943724513053894,
"learning_rate": 1.236024844720497e-06,
"logits/chosen": -4.5,
"logits/rejected": -4.5,
"logps/chosen": -624.0,
"logps/rejected": -556.0,
"loss": 0.2502,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.734375,
"rewards/margins": 2.296875,
"rewards/rejected": -0.56640625,
"step": 650
},
{
"epoch": 1.5557027225901399,
"grad_norm": 1.7159101963043213,
"learning_rate": 1.173913043478261e-06,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -536.0,
"logps/rejected": -494.0,
"loss": 0.2423,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.71875,
"rewards/margins": 2.34375,
"rewards/rejected": -0.62109375,
"step": 660
},
{
"epoch": 1.5792494481236203,
"grad_norm": 1.5926669836044312,
"learning_rate": 1.111801242236025e-06,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -568.0,
"logps/rejected": -502.0,
"loss": 0.234,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.515625,
"rewards/margins": 2.03125,
"rewards/rejected": -0.515625,
"step": 670
},
{
"epoch": 1.6027961736571008,
"grad_norm": 1.9127466678619385,
"learning_rate": 1.049689440993789e-06,
"logits/chosen": -4.46875,
"logits/rejected": -4.5,
"logps/chosen": -612.0,
"logps/rejected": -508.0,
"loss": 0.2337,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.9375,
"rewards/margins": 2.375,
"rewards/rejected": -0.4296875,
"step": 680
},
{
"epoch": 1.6263428991905813,
"grad_norm": 1.569267749786377,
"learning_rate": 9.875776397515528e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -596.0,
"logps/rejected": -548.0,
"loss": 0.2294,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.8984375,
"rewards/margins": 2.28125,
"rewards/rejected": -0.375,
"step": 690
},
{
"epoch": 1.6498896247240618,
"grad_norm": 1.7182821035385132,
"learning_rate": 9.254658385093168e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.4375,
"logps/chosen": -632.0,
"logps/rejected": -520.0,
"loss": 0.2386,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 1.8046875,
"rewards/margins": 2.546875,
"rewards/rejected": -0.734375,
"step": 700
},
{
"epoch": 1.6734363502575422,
"grad_norm": 1.6479544639587402,
"learning_rate": 8.633540372670808e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.5,
"logps/chosen": -576.0,
"logps/rejected": -510.0,
"loss": 0.2277,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.703125,
"rewards/margins": 2.421875,
"rewards/rejected": -0.71484375,
"step": 710
},
{
"epoch": 1.6969830757910227,
"grad_norm": 1.6700725555419922,
"learning_rate": 8.012422360248448e-07,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -572.0,
"logps/rejected": -464.0,
"loss": 0.2362,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": 1.765625,
"rewards/margins": 2.375,
"rewards/rejected": -0.609375,
"step": 720
},
{
"epoch": 1.7205298013245032,
"grad_norm": 1.8397201299667358,
"learning_rate": 7.391304347826088e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.5,
"logps/chosen": -656.0,
"logps/rejected": -556.0,
"loss": 0.2306,
"rewards/accuracies": 0.956250011920929,
"rewards/chosen": 1.875,
"rewards/margins": 2.65625,
"rewards/rejected": -0.77734375,
"step": 730
},
{
"epoch": 1.7440765268579839,
"grad_norm": 1.8021209239959717,
"learning_rate": 6.770186335403728e-07,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -600.0,
"logps/rejected": -556.0,
"loss": 0.2239,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": 1.9296875,
"rewards/margins": 2.234375,
"rewards/rejected": -0.306640625,
"step": 740
},
{
"epoch": 1.7676232523914643,
"grad_norm": 1.8263683319091797,
"learning_rate": 6.149068322981367e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.34375,
"logps/chosen": -548.0,
"logps/rejected": -516.0,
"loss": 0.2322,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.6328125,
"rewards/margins": 2.171875,
"rewards/rejected": -0.54296875,
"step": 750
},
{
"epoch": 1.7911699779249448,
"grad_norm": 1.5601938962936401,
"learning_rate": 5.527950310559007e-07,
"logits/chosen": -4.46875,
"logits/rejected": -4.46875,
"logps/chosen": -572.0,
"logps/rejected": -494.0,
"loss": 0.2401,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.84375,
"rewards/margins": 2.171875,
"rewards/rejected": -0.3359375,
"step": 760
},
{
"epoch": 1.8147167034584253,
"grad_norm": 1.8713370561599731,
"learning_rate": 4.906832298136646e-07,
"logits/chosen": -4.5,
"logits/rejected": -4.4375,
"logps/chosen": -544.0,
"logps/rejected": -494.0,
"loss": 0.2345,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": 1.71875,
"rewards/margins": 2.296875,
"rewards/rejected": -0.58203125,
"step": 770
},
{
"epoch": 1.838263428991906,
"grad_norm": 1.3841131925582886,
"learning_rate": 4.285714285714286e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.375,
"logps/chosen": -576.0,
"logps/rejected": -500.0,
"loss": 0.2201,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": 1.6640625,
"rewards/margins": 2.3125,
"rewards/rejected": -0.640625,
"step": 780
},
{
"epoch": 1.8618101545253865,
"grad_norm": 1.6164636611938477,
"learning_rate": 3.664596273291926e-07,
"logits/chosen": -4.46875,
"logits/rejected": -4.53125,
"logps/chosen": -576.0,
"logps/rejected": -520.0,
"loss": 0.2429,
"rewards/accuracies": 0.90625,
"rewards/chosen": 1.6953125,
"rewards/margins": 2.1875,
"rewards/rejected": -0.4921875,
"step": 790
},
{
"epoch": 1.885356880058867,
"grad_norm": 1.6874057054519653,
"learning_rate": 3.0434782608695656e-07,
"logits/chosen": -4.5,
"logits/rejected": -4.4375,
"logps/chosen": -572.0,
"logps/rejected": -524.0,
"loss": 0.2396,
"rewards/accuracies": 0.9375,
"rewards/chosen": 1.7265625,
"rewards/margins": 2.234375,
"rewards/rejected": -0.51171875,
"step": 800
},
{
"epoch": 1.9089036055923474,
"grad_norm": 1.7373439073562622,
"learning_rate": 2.422360248447205e-07,
"logits/chosen": -4.4375,
"logits/rejected": -4.46875,
"logps/chosen": -588.0,
"logps/rejected": -508.0,
"loss": 0.2511,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.9765625,
"rewards/margins": 2.34375,
"rewards/rejected": -0.37109375,
"step": 810
},
{
"epoch": 1.9324503311258279,
"grad_norm": 1.989230990409851,
"learning_rate": 1.8012422360248447e-07,
"logits/chosen": -4.375,
"logits/rejected": -4.34375,
"logps/chosen": -592.0,
"logps/rejected": -504.0,
"loss": 0.2427,
"rewards/accuracies": 0.8812500238418579,
"rewards/chosen": 1.6640625,
"rewards/margins": 2.0625,
"rewards/rejected": -0.392578125,
"step": 820
},
{
"epoch": 1.9559970566593083,
"grad_norm": 1.692975640296936,
"learning_rate": 1.1801242236024847e-07,
"logits/chosen": -4.40625,
"logits/rejected": -4.4375,
"logps/chosen": -620.0,
"logps/rejected": -512.0,
"loss": 0.2267,
"rewards/accuracies": 0.9437500238418579,
"rewards/chosen": 1.7890625,
"rewards/margins": 2.53125,
"rewards/rejected": -0.7421875,
"step": 830
},
{
"epoch": 1.9795437821927888,
"grad_norm": 1.7542818784713745,
"learning_rate": 5.590062111801243e-08,
"logits/chosen": -4.375,
"logits/rejected": -4.46875,
"logps/chosen": -604.0,
"logps/rejected": -536.0,
"loss": 0.2425,
"rewards/accuracies": 0.893750011920929,
"rewards/chosen": 1.8359375,
"rewards/margins": 2.078125,
"rewards/rejected": -0.244140625,
"step": 840
}
],
"logging_steps": 10,
"max_steps": 848,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 250,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}