|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.16327085955304602, |
|
"eval_steps": 50, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.005442361985101534, |
|
"grad_norm": 223.0, |
|
"learning_rate": 1.4999011862064836e-06, |
|
"logits/chosen": -0.3115585446357727, |
|
"logits/rejected": -0.4312504231929779, |
|
"logps/chosen": -0.8023494482040405, |
|
"logps/rejected": -0.9114950299263, |
|
"loss": 1.7206, |
|
"nll_loss": 0.8566828966140747, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -2.005873680114746, |
|
"rewards/margins": 0.27286386489868164, |
|
"rewards/rejected": -2.2787375450134277, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010884723970203068, |
|
"grad_norm": 213.0, |
|
"learning_rate": 1.4996047737567963e-06, |
|
"logits/chosen": -0.2838870584964752, |
|
"logits/rejected": -0.4562837481498718, |
|
"logps/chosen": -0.756219744682312, |
|
"logps/rejected": -0.893709659576416, |
|
"loss": 1.6545, |
|
"nll_loss": 0.8323467373847961, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -1.8905493021011353, |
|
"rewards/margins": 0.34372463822364807, |
|
"rewards/rejected": -2.23427414894104, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0163270859553046, |
|
"grad_norm": 207.0, |
|
"learning_rate": 1.4991108494350523e-06, |
|
"logits/chosen": -0.2802577018737793, |
|
"logits/rejected": -0.445369154214859, |
|
"logps/chosen": -0.7702305912971497, |
|
"logps/rejected": -0.9028533101081848, |
|
"loss": 1.67, |
|
"nll_loss": 0.8351463079452515, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -1.9255762100219727, |
|
"rewards/margins": 0.331557035446167, |
|
"rewards/rejected": -2.2571334838867188, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.021769447940406136, |
|
"grad_norm": 193.0, |
|
"learning_rate": 1.4984195578532098e-06, |
|
"logits/chosen": -0.27219459414482117, |
|
"logits/rejected": -0.43887200951576233, |
|
"logps/chosen": -0.7428117990493774, |
|
"logps/rejected": -0.8896303176879883, |
|
"loss": 1.6298, |
|
"nll_loss": 0.8110933303833008, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.8570295572280884, |
|
"rewards/margins": 0.36704641580581665, |
|
"rewards/rejected": -2.2240757942199707, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02721180992550767, |
|
"grad_norm": 196.0, |
|
"learning_rate": 1.4975311014087314e-06, |
|
"logits/chosen": -0.28368493914604187, |
|
"logits/rejected": -0.45796823501586914, |
|
"logps/chosen": -0.7394925951957703, |
|
"logps/rejected": -0.8978835344314575, |
|
"loss": 1.6038, |
|
"nll_loss": 0.810085117816925, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.8487316370010376, |
|
"rewards/margins": 0.3959771394729614, |
|
"rewards/rejected": -2.244708776473999, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0326541719106092, |
|
"grad_norm": 180.0, |
|
"learning_rate": 1.4964457402253263e-06, |
|
"logits/chosen": -0.26782557368278503, |
|
"logits/rejected": -0.4345061779022217, |
|
"logps/chosen": -0.7221552133560181, |
|
"logps/rejected": -0.8976587057113647, |
|
"loss": 1.5676, |
|
"nll_loss": 0.8051786422729492, |
|
"rewards/accuracies": 0.801562488079071, |
|
"rewards/chosen": -1.80538809299469, |
|
"rewards/margins": 0.4387587606906891, |
|
"rewards/rejected": -2.2441468238830566, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03809653389571074, |
|
"grad_norm": 184.0, |
|
"learning_rate": 1.495163792076789e-06, |
|
"logits/chosen": -0.2734525799751282, |
|
"logits/rejected": -0.4188796579837799, |
|
"logps/chosen": -0.7307429313659668, |
|
"logps/rejected": -0.9264034032821655, |
|
"loss": 1.5643, |
|
"nll_loss": 0.8087199926376343, |
|
"rewards/accuracies": 0.809374988079071, |
|
"rewards/chosen": -1.826857328414917, |
|
"rewards/margins": 0.4891512393951416, |
|
"rewards/rejected": -2.3160085678100586, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04353889588081227, |
|
"grad_norm": 167.0, |
|
"learning_rate": 1.493685632293963e-06, |
|
"logits/chosen": -0.2776848375797272, |
|
"logits/rejected": -0.45066213607788086, |
|
"logps/chosen": -0.6951563954353333, |
|
"logps/rejected": -0.9053373336791992, |
|
"loss": 1.5161, |
|
"nll_loss": 0.7817299962043762, |
|
"rewards/accuracies": 0.8296874761581421, |
|
"rewards/chosen": -1.7378908395767212, |
|
"rewards/margins": 0.525452733039856, |
|
"rewards/rejected": -2.263343572616577, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.048981257865913806, |
|
"grad_norm": 166.0, |
|
"learning_rate": 1.49201169365485e-06, |
|
"logits/chosen": -0.2875303626060486, |
|
"logits/rejected": -0.4160293936729431, |
|
"logps/chosen": -0.7146323919296265, |
|
"logps/rejected": -0.9353097677230835, |
|
"loss": 1.509, |
|
"nll_loss": 0.789823055267334, |
|
"rewards/accuracies": 0.854687511920929, |
|
"rewards/chosen": -1.786581039428711, |
|
"rewards/margins": 0.551693320274353, |
|
"rewards/rejected": -2.3382744789123535, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05442361985101534, |
|
"grad_norm": 167.0, |
|
"learning_rate": 1.490142466257901e-06, |
|
"logits/chosen": -0.2606073021888733, |
|
"logits/rejected": -0.42186981439590454, |
|
"logps/chosen": -0.6743353605270386, |
|
"logps/rejected": -0.9106225967407227, |
|
"loss": 1.472, |
|
"nll_loss": 0.7610515356063843, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.6858384609222412, |
|
"rewards/margins": 0.590718150138855, |
|
"rewards/rejected": -2.2765564918518066, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05442361985101534, |
|
"eval_logits/chosen": -0.4242976903915405, |
|
"eval_logits/rejected": -0.5250905156135559, |
|
"eval_logps/chosen": -0.6774176955223083, |
|
"eval_logps/rejected": -0.9184495806694031, |
|
"eval_loss": 1.466933012008667, |
|
"eval_nll_loss": 0.7765384316444397, |
|
"eval_rewards/accuracies": 0.8569999933242798, |
|
"eval_rewards/chosen": -1.6935441493988037, |
|
"eval_rewards/margins": 0.6025797128677368, |
|
"eval_rewards/rejected": -2.29612398147583, |
|
"eval_runtime": 119.2017, |
|
"eval_samples_per_second": 8.389, |
|
"eval_steps_per_second": 2.097, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.059865981836116876, |
|
"grad_norm": 176.0, |
|
"learning_rate": 1.4880784973785227e-06, |
|
"logits/chosen": -0.2771835923194885, |
|
"logits/rejected": -0.43040376901626587, |
|
"logps/chosen": -0.6886002421379089, |
|
"logps/rejected": -0.9180153608322144, |
|
"loss": 1.4611, |
|
"nll_loss": 0.7568584084510803, |
|
"rewards/accuracies": 0.8687499761581421, |
|
"rewards/chosen": -1.7215007543563843, |
|
"rewards/margins": 0.5735376477241516, |
|
"rewards/rejected": -2.2950384616851807, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0653083438212184, |
|
"grad_norm": 173.0, |
|
"learning_rate": 1.4858203913088481e-06, |
|
"logits/chosen": -0.28170254826545715, |
|
"logits/rejected": -0.43440356850624084, |
|
"logps/chosen": -0.6944113969802856, |
|
"logps/rejected": -0.958784282207489, |
|
"loss": 1.456, |
|
"nll_loss": 0.7766550183296204, |
|
"rewards/accuracies": 0.8531249761581421, |
|
"rewards/chosen": -1.7360286712646484, |
|
"rewards/margins": 0.6609319448471069, |
|
"rewards/rejected": -2.396960496902466, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07075070580631994, |
|
"grad_norm": 175.0, |
|
"learning_rate": 1.4833688091808084e-06, |
|
"logits/chosen": -0.29959458112716675, |
|
"logits/rejected": -0.4557119905948639, |
|
"logps/chosen": -0.6825212836265564, |
|
"logps/rejected": -0.9580196142196655, |
|
"loss": 1.4192, |
|
"nll_loss": 0.7619790434837341, |
|
"rewards/accuracies": 0.895312488079071, |
|
"rewards/chosen": -1.7063030004501343, |
|
"rewards/margins": 0.6887460947036743, |
|
"rewards/rejected": -2.3950493335723877, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07619306779142147, |
|
"grad_norm": 189.0, |
|
"learning_rate": 1.4807244687725672e-06, |
|
"logits/chosen": -0.2963514029979706, |
|
"logits/rejected": -0.4536547064781189, |
|
"logps/chosen": -0.6995417475700378, |
|
"logps/rejected": -0.9971321821212769, |
|
"loss": 1.4071, |
|
"nll_loss": 0.7711254358291626, |
|
"rewards/accuracies": 0.903124988079071, |
|
"rewards/chosen": -1.748854398727417, |
|
"rewards/margins": 0.7439761757850647, |
|
"rewards/rejected": -2.492830753326416, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08163542977652301, |
|
"grad_norm": 192.0, |
|
"learning_rate": 1.477888144298368e-06, |
|
"logits/chosen": -0.3087882399559021, |
|
"logits/rejected": -0.4929865002632141, |
|
"logps/chosen": -0.6821542978286743, |
|
"logps/rejected": -1.0003395080566406, |
|
"loss": 1.3817, |
|
"nll_loss": 0.7631580233573914, |
|
"rewards/accuracies": 0.8984375, |
|
"rewards/chosen": -1.7053858041763306, |
|
"rewards/margins": 0.7954627871513367, |
|
"rewards/rejected": -2.5008487701416016, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08707779176162454, |
|
"grad_norm": 250.0, |
|
"learning_rate": 1.4748606661818576e-06, |
|
"logits/chosen": -0.30626195669174194, |
|
"logits/rejected": -0.5067037343978882, |
|
"logps/chosen": -0.6984843015670776, |
|
"logps/rejected": -1.0486465692520142, |
|
"loss": 1.3604, |
|
"nll_loss": 0.777851939201355, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -1.7462108135223389, |
|
"rewards/margins": 0.8754053115844727, |
|
"rewards/rejected": -2.6216163635253906, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09252015374672608, |
|
"grad_norm": 229.0, |
|
"learning_rate": 1.4716429208129552e-06, |
|
"logits/chosen": -0.3446356952190399, |
|
"logits/rejected": -0.558761477470398, |
|
"logps/chosen": -0.6777626276016235, |
|
"logps/rejected": -1.0675888061523438, |
|
"loss": 1.3122, |
|
"nll_loss": 0.7588330507278442, |
|
"rewards/accuracies": 0.9359375238418579, |
|
"rewards/chosen": -1.694406509399414, |
|
"rewards/margins": 0.9745653867721558, |
|
"rewards/rejected": -2.6689720153808594, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09796251573182761, |
|
"grad_norm": 229.0, |
|
"learning_rate": 1.4682358502883309e-06, |
|
"logits/chosen": -0.36884021759033203, |
|
"logits/rejected": -0.6074358820915222, |
|
"logps/chosen": -0.6712280511856079, |
|
"logps/rejected": -1.1392602920532227, |
|
"loss": 1.2571, |
|
"nll_loss": 0.7604485154151917, |
|
"rewards/accuracies": 0.940625011920929, |
|
"rewards/chosen": -1.678070068359375, |
|
"rewards/margins": 1.1700807809829712, |
|
"rewards/rejected": -2.8481509685516357, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10340487771692915, |
|
"grad_norm": 248.0, |
|
"learning_rate": 1.4646404521355798e-06, |
|
"logits/chosen": -0.3828192353248596, |
|
"logits/rejected": -0.6576655507087708, |
|
"logps/chosen": -0.695937991142273, |
|
"logps/rejected": -1.2227665185928345, |
|
"loss": 1.2367, |
|
"nll_loss": 0.7761465311050415, |
|
"rewards/accuracies": 0.9468749761581421, |
|
"rewards/chosen": -1.7398450374603271, |
|
"rewards/margins": 1.3170711994171143, |
|
"rewards/rejected": -3.0569162368774414, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.10884723970203068, |
|
"grad_norm": 233.0, |
|
"learning_rate": 1.4608577790211639e-06, |
|
"logits/chosen": -0.408522367477417, |
|
"logits/rejected": -0.6887016296386719, |
|
"logps/chosen": -0.6912533640861511, |
|
"logps/rejected": -1.2720332145690918, |
|
"loss": 1.1964, |
|
"nll_loss": 0.7770565748214722, |
|
"rewards/accuracies": 0.956250011920929, |
|
"rewards/chosen": -1.7281334400177002, |
|
"rewards/margins": 1.4519497156143188, |
|
"rewards/rejected": -3.1800830364227295, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.10884723970203068, |
|
"eval_logits/chosen": -0.6553338766098022, |
|
"eval_logits/rejected": -0.960581362247467, |
|
"eval_logps/chosen": -0.6551228165626526, |
|
"eval_logps/rejected": -1.286936640739441, |
|
"eval_loss": 1.1423654556274414, |
|
"eval_nll_loss": 0.7609782814979553, |
|
"eval_rewards/accuracies": 0.9639999866485596, |
|
"eval_rewards/chosen": -1.637807011604309, |
|
"eval_rewards/margins": 1.579534649848938, |
|
"eval_rewards/rejected": -3.217341899871826, |
|
"eval_runtime": 122.2807, |
|
"eval_samples_per_second": 8.178, |
|
"eval_steps_per_second": 2.044, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11428960168713222, |
|
"grad_norm": 244.0, |
|
"learning_rate": 1.4568889384422085e-06, |
|
"logits/chosen": -0.44453367590904236, |
|
"logits/rejected": -0.8137799501419067, |
|
"logps/chosen": -0.6734666228294373, |
|
"logps/rejected": -1.3720922470092773, |
|
"loss": 1.1097, |
|
"nll_loss": 0.7629668116569519, |
|
"rewards/accuracies": 0.9703124761581421, |
|
"rewards/chosen": -1.6836665868759155, |
|
"rewards/margins": 1.7465636730194092, |
|
"rewards/rejected": -3.4302303791046143, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.11973196367223375, |
|
"grad_norm": 232.0, |
|
"learning_rate": 1.4527350924022508e-06, |
|
"logits/chosen": -0.4658277928829193, |
|
"logits/rejected": -0.8643436431884766, |
|
"logps/chosen": -0.6887673735618591, |
|
"logps/rejected": -1.4408514499664307, |
|
"loss": 1.0866, |
|
"nll_loss": 0.7645546793937683, |
|
"rewards/accuracies": 0.9765625, |
|
"rewards/chosen": -1.7219183444976807, |
|
"rewards/margins": 1.8802101612091064, |
|
"rewards/rejected": -3.602128505706787, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12517432565733527, |
|
"grad_norm": 227.0, |
|
"learning_rate": 1.4483974570710226e-06, |
|
"logits/chosen": -0.4871746897697449, |
|
"logits/rejected": -1.0338547229766846, |
|
"logps/chosen": -0.6287084817886353, |
|
"logps/rejected": -1.4971318244934082, |
|
"loss": 0.9829, |
|
"nll_loss": 0.7324297428131104, |
|
"rewards/accuracies": 0.984375, |
|
"rewards/chosen": -1.571771264076233, |
|
"rewards/margins": 2.171058177947998, |
|
"rewards/rejected": -3.7428295612335205, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.1306166876424368, |
|
"grad_norm": 186.0, |
|
"learning_rate": 1.4438773024283826e-06, |
|
"logits/chosen": -0.5202233195304871, |
|
"logits/rejected": -1.1327874660491943, |
|
"logps/chosen": -0.6288429498672485, |
|
"logps/rejected": -1.5928165912628174, |
|
"loss": 0.959, |
|
"nll_loss": 0.7312449216842651, |
|
"rewards/accuracies": 0.9859374761581421, |
|
"rewards/chosen": -1.572107195854187, |
|
"rewards/margins": 2.4099345207214355, |
|
"rewards/rejected": -3.982041120529175, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13605904962753834, |
|
"grad_norm": 177.0, |
|
"learning_rate": 1.4391759518924858e-06, |
|
"logits/chosen": -0.5006698369979858, |
|
"logits/rejected": -1.2095844745635986, |
|
"logps/chosen": -0.6492566466331482, |
|
"logps/rejected": -1.7050262689590454, |
|
"loss": 0.9298, |
|
"nll_loss": 0.742263674736023, |
|
"rewards/accuracies": 0.9906250238418579, |
|
"rewards/chosen": -1.623141884803772, |
|
"rewards/margins": 2.6394238471984863, |
|
"rewards/rejected": -4.262566089630127, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.14150141161263988, |
|
"grad_norm": 155.0, |
|
"learning_rate": 1.4342947819323133e-06, |
|
"logits/chosen": -0.49277132749557495, |
|
"logits/rejected": -1.2336362600326538, |
|
"logps/chosen": -0.6462730169296265, |
|
"logps/rejected": -1.7665297985076904, |
|
"loss": 0.9073, |
|
"nll_loss": 0.7340320348739624, |
|
"rewards/accuracies": 0.9906250238418579, |
|
"rewards/chosen": -1.615682601928711, |
|
"rewards/margins": 2.8006420135498047, |
|
"rewards/rejected": -4.416324615478516, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1469437735977414, |
|
"grad_norm": 157.0, |
|
"learning_rate": 1.4292352216646672e-06, |
|
"logits/chosen": -0.4820989966392517, |
|
"logits/rejected": -1.2759294509887695, |
|
"logps/chosen": -0.6405208706855774, |
|
"logps/rejected": -1.8488733768463135, |
|
"loss": 0.8986, |
|
"nll_loss": 0.7417997121810913, |
|
"rewards/accuracies": 0.989062488079071, |
|
"rewards/chosen": -1.601301908493042, |
|
"rewards/margins": 3.020881175994873, |
|
"rewards/rejected": -4.622182846069336, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.15238613558284295, |
|
"grad_norm": 117.0, |
|
"learning_rate": 1.4239987524357501e-06, |
|
"logits/chosen": -0.48703551292419434, |
|
"logits/rejected": -1.3474723100662231, |
|
"logps/chosen": -0.6453564167022705, |
|
"logps/rejected": -1.947719931602478, |
|
"loss": 0.8587, |
|
"nll_loss": 0.7325531840324402, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.6133911609649658, |
|
"rewards/margins": 3.2559094429016113, |
|
"rewards/rejected": -4.86929988861084, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15782849756794448, |
|
"grad_norm": 114.0, |
|
"learning_rate": 1.4185869073874565e-06, |
|
"logits/chosen": -0.4773550033569336, |
|
"logits/rejected": -1.3999682664871216, |
|
"logps/chosen": -0.6471393704414368, |
|
"logps/rejected": -2.04455304145813, |
|
"loss": 0.8458, |
|
"nll_loss": 0.7375032305717468, |
|
"rewards/accuracies": 0.996874988079071, |
|
"rewards/chosen": -1.6178483963012695, |
|
"rewards/margins": 3.493534803390503, |
|
"rewards/rejected": -5.111382961273193, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.16327085955304602, |
|
"grad_norm": 102.5, |
|
"learning_rate": 1.413001271008494e-06, |
|
"logits/chosen": -0.5018254518508911, |
|
"logits/rejected": -1.4777194261550903, |
|
"logps/chosen": -0.6435462832450867, |
|
"logps/rejected": -2.1130857467651367, |
|
"loss": 0.8377, |
|
"nll_loss": 0.7388636469841003, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.608865737915039, |
|
"rewards/margins": 3.673849105834961, |
|
"rewards/rejected": -5.28271484375, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16327085955304602, |
|
"eval_logits/chosen": -0.6831561326980591, |
|
"eval_logits/rejected": -1.8252586126327515, |
|
"eval_logps/chosen": -0.6115793585777283, |
|
"eval_logps/rejected": -2.0589704513549805, |
|
"eval_loss": 0.8232702612876892, |
|
"eval_nll_loss": 0.7224369645118713, |
|
"eval_rewards/accuracies": 0.9929999709129333, |
|
"eval_rewards/chosen": -1.528948187828064, |
|
"eval_rewards/margins": 3.6184778213500977, |
|
"eval_rewards/rejected": -5.147425651550293, |
|
"eval_runtime": 112.2438, |
|
"eval_samples_per_second": 8.909, |
|
"eval_steps_per_second": 2.227, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 918, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|