gsmyrnis's picture
End of training
62fcca6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1602,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018726591760299626,
"grad_norm": 6.9583668884714545,
"learning_rate": 6.17283950617284e-07,
"loss": 0.5853,
"step": 10
},
{
"epoch": 0.03745318352059925,
"grad_norm": 2.459747968301057,
"learning_rate": 1.234567901234568e-06,
"loss": 0.4956,
"step": 20
},
{
"epoch": 0.056179775280898875,
"grad_norm": 1.5518874081248877,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.4511,
"step": 30
},
{
"epoch": 0.0749063670411985,
"grad_norm": 1.327079400917728,
"learning_rate": 2.469135802469136e-06,
"loss": 0.4242,
"step": 40
},
{
"epoch": 0.09363295880149813,
"grad_norm": 1.6052721933898932,
"learning_rate": 3.08641975308642e-06,
"loss": 0.4112,
"step": 50
},
{
"epoch": 0.11235955056179775,
"grad_norm": 2.1363218656503236,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.4056,
"step": 60
},
{
"epoch": 0.13108614232209737,
"grad_norm": 2.5025301508678246,
"learning_rate": 4.3209876543209875e-06,
"loss": 0.3934,
"step": 70
},
{
"epoch": 0.149812734082397,
"grad_norm": 1.6968039665700831,
"learning_rate": 4.938271604938272e-06,
"loss": 0.3931,
"step": 80
},
{
"epoch": 0.16853932584269662,
"grad_norm": 1.710626024878931,
"learning_rate": 4.999611253625062e-06,
"loss": 0.3854,
"step": 90
},
{
"epoch": 0.18726591760299627,
"grad_norm": 2.229340351997921,
"learning_rate": 4.99826761145702e-06,
"loss": 0.388,
"step": 100
},
{
"epoch": 0.20599250936329588,
"grad_norm": 1.6554235103974217,
"learning_rate": 4.995964847225794e-06,
"loss": 0.3798,
"step": 110
},
{
"epoch": 0.2247191011235955,
"grad_norm": 1.5390529564927018,
"learning_rate": 4.992703943301973e-06,
"loss": 0.3788,
"step": 120
},
{
"epoch": 0.24344569288389514,
"grad_norm": 1.686640147089175,
"learning_rate": 4.988486290803271e-06,
"loss": 0.3724,
"step": 130
},
{
"epoch": 0.26217228464419473,
"grad_norm": 1.4586531417129365,
"learning_rate": 4.983313689001068e-06,
"loss": 0.3737,
"step": 140
},
{
"epoch": 0.2808988764044944,
"grad_norm": 1.383788345325366,
"learning_rate": 4.977188344552831e-06,
"loss": 0.3708,
"step": 150
},
{
"epoch": 0.299625468164794,
"grad_norm": 2.344749126006063,
"learning_rate": 4.970112870560744e-06,
"loss": 0.3705,
"step": 160
},
{
"epoch": 0.31835205992509363,
"grad_norm": 1.333313085188304,
"learning_rate": 4.962090285456943e-06,
"loss": 0.3726,
"step": 170
},
{
"epoch": 0.33707865168539325,
"grad_norm": 1.2767396164379259,
"learning_rate": 4.953124011715844e-06,
"loss": 0.368,
"step": 180
},
{
"epoch": 0.35580524344569286,
"grad_norm": 1.8939736466696433,
"learning_rate": 4.943217874394092e-06,
"loss": 0.3673,
"step": 190
},
{
"epoch": 0.37453183520599254,
"grad_norm": 1.4076191572126282,
"learning_rate": 4.932376099498779e-06,
"loss": 0.3592,
"step": 200
},
{
"epoch": 0.39325842696629215,
"grad_norm": 1.2393424297302382,
"learning_rate": 4.920603312184602e-06,
"loss": 0.3646,
"step": 210
},
{
"epoch": 0.41198501872659177,
"grad_norm": 1.6712067045257082,
"learning_rate": 4.907904534780752e-06,
"loss": 0.3567,
"step": 220
},
{
"epoch": 0.4307116104868914,
"grad_norm": 1.612702425647588,
"learning_rate": 4.894285184648369e-06,
"loss": 0.3585,
"step": 230
},
{
"epoch": 0.449438202247191,
"grad_norm": 2.4813858906223834,
"learning_rate": 4.879751071869461e-06,
"loss": 0.3623,
"step": 240
},
{
"epoch": 0.4681647940074906,
"grad_norm": 2.163116509485285,
"learning_rate": 4.864308396768294e-06,
"loss": 0.3586,
"step": 250
},
{
"epoch": 0.4868913857677903,
"grad_norm": 1.7525497737806786,
"learning_rate": 4.847963747266312e-06,
"loss": 0.3547,
"step": 260
},
{
"epoch": 0.5056179775280899,
"grad_norm": 1.3534898789991496,
"learning_rate": 4.83072409607169e-06,
"loss": 0.3534,
"step": 270
},
{
"epoch": 0.5243445692883895,
"grad_norm": 1.6829737229447668,
"learning_rate": 4.81259679770474e-06,
"loss": 0.3519,
"step": 280
},
{
"epoch": 0.5430711610486891,
"grad_norm": 1.3735075038975693,
"learning_rate": 4.7935895853604455e-06,
"loss": 0.353,
"step": 290
},
{
"epoch": 0.5617977528089888,
"grad_norm": 1.9414583619507375,
"learning_rate": 4.773710567609436e-06,
"loss": 0.3547,
"step": 300
},
{
"epoch": 0.5805243445692884,
"grad_norm": 1.4766636724732591,
"learning_rate": 4.7529682249388284e-06,
"loss": 0.3515,
"step": 310
},
{
"epoch": 0.599250936329588,
"grad_norm": 1.420170786947651,
"learning_rate": 4.7313714061344165e-06,
"loss": 0.3559,
"step": 320
},
{
"epoch": 0.6179775280898876,
"grad_norm": 1.585688048778078,
"learning_rate": 4.708929324505729e-06,
"loss": 0.3474,
"step": 330
},
{
"epoch": 0.6367041198501873,
"grad_norm": 1.4660422960583963,
"learning_rate": 4.685651553955589e-06,
"loss": 0.3478,
"step": 340
},
{
"epoch": 0.6554307116104869,
"grad_norm": 1.729952760499875,
"learning_rate": 4.661548024895843e-06,
"loss": 0.3502,
"step": 350
},
{
"epoch": 0.6741573033707865,
"grad_norm": 1.5477887192577897,
"learning_rate": 4.636629020010989e-06,
"loss": 0.3462,
"step": 360
},
{
"epoch": 0.6928838951310862,
"grad_norm": 1.3822726826009006,
"learning_rate": 4.6109051698715425e-06,
"loss": 0.3447,
"step": 370
},
{
"epoch": 0.7116104868913857,
"grad_norm": 1.4151446110625596,
"learning_rate": 4.5843874483989744e-06,
"loss": 0.3452,
"step": 380
},
{
"epoch": 0.7303370786516854,
"grad_norm": 1.213585225271953,
"learning_rate": 4.5570871681841915e-06,
"loss": 0.3525,
"step": 390
},
{
"epoch": 0.7490636704119851,
"grad_norm": 1.2153292815838297,
"learning_rate": 4.529015975661518e-06,
"loss": 0.3497,
"step": 400
},
{
"epoch": 0.7677902621722846,
"grad_norm": 1.0767956653910644,
"learning_rate": 4.5001858461402765e-06,
"loss": 0.3469,
"step": 410
},
{
"epoch": 0.7865168539325843,
"grad_norm": 1.0725016047427482,
"learning_rate": 4.470609078696062e-06,
"loss": 0.3459,
"step": 420
},
{
"epoch": 0.8052434456928839,
"grad_norm": 1.14757314560185,
"learning_rate": 4.440298290923893e-06,
"loss": 0.3446,
"step": 430
},
{
"epoch": 0.8239700374531835,
"grad_norm": 1.435028611591542,
"learning_rate": 4.409266413555481e-06,
"loss": 0.3468,
"step": 440
},
{
"epoch": 0.8426966292134831,
"grad_norm": 1.3563421475894943,
"learning_rate": 4.3775266849429245e-06,
"loss": 0.3471,
"step": 450
},
{
"epoch": 0.8614232209737828,
"grad_norm": 1.272968934220072,
"learning_rate": 4.345092645411154e-06,
"loss": 0.3459,
"step": 460
},
{
"epoch": 0.8801498127340824,
"grad_norm": 1.1988760260435716,
"learning_rate": 4.311978131481565e-06,
"loss": 0.3439,
"step": 470
},
{
"epoch": 0.898876404494382,
"grad_norm": 1.2157009389799966,
"learning_rate": 4.2781972699692955e-06,
"loss": 0.341,
"step": 480
},
{
"epoch": 0.9176029962546817,
"grad_norm": 1.2302691306481766,
"learning_rate": 4.243764471956648e-06,
"loss": 0.3436,
"step": 490
},
{
"epoch": 0.9363295880149812,
"grad_norm": 1.1395524537917356,
"learning_rate": 4.208694426645257e-06,
"loss": 0.3442,
"step": 500
},
{
"epoch": 0.9550561797752809,
"grad_norm": 1.0502686640912897,
"learning_rate": 4.1730020950895985e-06,
"loss": 0.3406,
"step": 510
},
{
"epoch": 0.9737827715355806,
"grad_norm": 1.1788715508510565,
"learning_rate": 4.136702703814536e-06,
"loss": 0.3399,
"step": 520
},
{
"epoch": 0.9925093632958801,
"grad_norm": 0.9961303841798358,
"learning_rate": 4.0998117383196035e-06,
"loss": 0.3426,
"step": 530
},
{
"epoch": 1.0,
"eval_loss": 0.3397010564804077,
"eval_runtime": 49.3629,
"eval_samples_per_second": 291.271,
"eval_steps_per_second": 1.155,
"step": 534
},
{
"epoch": 1.0112359550561798,
"grad_norm": 1.5016330309741552,
"learning_rate": 4.062344936472819e-06,
"loss": 0.3041,
"step": 540
},
{
"epoch": 1.0299625468164795,
"grad_norm": 1.4105174151178423,
"learning_rate": 4.024318281796832e-06,
"loss": 0.2653,
"step": 550
},
{
"epoch": 1.048689138576779,
"grad_norm": 1.1267842205009497,
"learning_rate": 3.985747996650271e-06,
"loss": 0.2635,
"step": 560
},
{
"epoch": 1.0674157303370786,
"grad_norm": 1.4577741522220231,
"learning_rate": 3.94665053530721e-06,
"loss": 0.2661,
"step": 570
},
{
"epoch": 1.0861423220973783,
"grad_norm": 1.1743755144218508,
"learning_rate": 3.907042576937689e-06,
"loss": 0.2614,
"step": 580
},
{
"epoch": 1.104868913857678,
"grad_norm": 1.1077587707464613,
"learning_rate": 3.8669410184923e-06,
"loss": 0.2638,
"step": 590
},
{
"epoch": 1.1235955056179776,
"grad_norm": 1.499213012424364,
"learning_rate": 3.826362967493867e-06,
"loss": 0.2649,
"step": 600
},
{
"epoch": 1.142322097378277,
"grad_norm": 1.2186830879218926,
"learning_rate": 3.7853257347392865e-06,
"loss": 0.2664,
"step": 610
},
{
"epoch": 1.1610486891385767,
"grad_norm": 1.5365523984386804,
"learning_rate": 3.74384682691466e-06,
"loss": 0.2656,
"step": 620
},
{
"epoch": 1.1797752808988764,
"grad_norm": 1.2824215690021454,
"learning_rate": 3.701943939126856e-06,
"loss": 0.264,
"step": 630
},
{
"epoch": 1.198501872659176,
"grad_norm": 1.1855864127248261,
"learning_rate": 3.659634947354686e-06,
"loss": 0.2612,
"step": 640
},
{
"epoch": 1.2172284644194757,
"grad_norm": 1.0357586593608685,
"learning_rate": 3.61693790082293e-06,
"loss": 0.2609,
"step": 650
},
{
"epoch": 1.2359550561797752,
"grad_norm": 1.2298112784157318,
"learning_rate": 3.5738710143024406e-06,
"loss": 0.2648,
"step": 660
},
{
"epoch": 1.2546816479400749,
"grad_norm": 1.1873771143182965,
"learning_rate": 3.530452660339638e-06,
"loss": 0.2648,
"step": 670
},
{
"epoch": 1.2734082397003745,
"grad_norm": 1.1104387940773652,
"learning_rate": 3.486701361418686e-06,
"loss": 0.2669,
"step": 680
},
{
"epoch": 1.2921348314606742,
"grad_norm": 1.0981466202062755,
"learning_rate": 3.4426357820597144e-06,
"loss": 0.2656,
"step": 690
},
{
"epoch": 1.3108614232209739,
"grad_norm": 1.0622356793414978,
"learning_rate": 3.3982747208564383e-06,
"loss": 0.2684,
"step": 700
},
{
"epoch": 1.3295880149812733,
"grad_norm": 1.1204181746461044,
"learning_rate": 3.353637102456585e-06,
"loss": 0.2632,
"step": 710
},
{
"epoch": 1.348314606741573,
"grad_norm": 1.1561988340566498,
"learning_rate": 3.30874196948855e-06,
"loss": 0.2655,
"step": 720
},
{
"epoch": 1.3670411985018727,
"grad_norm": 1.1634759808618433,
"learning_rate": 3.2636084744377123e-06,
"loss": 0.2662,
"step": 730
},
{
"epoch": 1.3857677902621723,
"grad_norm": 1.183622746360506,
"learning_rate": 3.218255871475894e-06,
"loss": 0.2665,
"step": 740
},
{
"epoch": 1.404494382022472,
"grad_norm": 1.3035644321187594,
"learning_rate": 3.172703508247433e-06,
"loss": 0.2641,
"step": 750
},
{
"epoch": 1.4232209737827715,
"grad_norm": 1.2520080394828148,
"learning_rate": 3.126970817615384e-06,
"loss": 0.2641,
"step": 760
},
{
"epoch": 1.4419475655430711,
"grad_norm": 1.1658528659149325,
"learning_rate": 3.0810773093713615e-06,
"loss": 0.2672,
"step": 770
},
{
"epoch": 1.4606741573033708,
"grad_norm": 1.1183168156658745,
"learning_rate": 3.0350425619125678e-06,
"loss": 0.2679,
"step": 780
},
{
"epoch": 1.4794007490636705,
"grad_norm": 1.2572209531909289,
"learning_rate": 2.988886213889551e-06,
"loss": 0.2673,
"step": 790
},
{
"epoch": 1.4981273408239701,
"grad_norm": 1.1500933028632634,
"learning_rate": 2.9426279558282617e-06,
"loss": 0.2661,
"step": 800
},
{
"epoch": 1.5168539325842696,
"grad_norm": 1.090606866883075,
"learning_rate": 2.896287521729974e-06,
"loss": 0.2616,
"step": 810
},
{
"epoch": 1.5355805243445693,
"grad_norm": 1.1320316226013576,
"learning_rate": 2.849884680652666e-06,
"loss": 0.27,
"step": 820
},
{
"epoch": 1.554307116104869,
"grad_norm": 1.103244276726941,
"learning_rate": 2.8034392282774415e-06,
"loss": 0.2635,
"step": 830
},
{
"epoch": 1.5730337078651684,
"grad_norm": 1.099593203599622,
"learning_rate": 2.7569709784635934e-06,
"loss": 0.2619,
"step": 840
},
{
"epoch": 1.5917602996254683,
"grad_norm": 1.0707708462874654,
"learning_rate": 2.710499754795916e-06,
"loss": 0.2636,
"step": 850
},
{
"epoch": 1.6104868913857677,
"grad_norm": 1.1077681383186893,
"learning_rate": 2.664045382127863e-06,
"loss": 0.2635,
"step": 860
},
{
"epoch": 1.6292134831460674,
"grad_norm": 1.0523839107618693,
"learning_rate": 2.6176276781241695e-06,
"loss": 0.2671,
"step": 870
},
{
"epoch": 1.647940074906367,
"grad_norm": 1.1453676425960706,
"learning_rate": 2.571266444806535e-06,
"loss": 0.2636,
"step": 880
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.0877845955489727,
"learning_rate": 2.5249814601059897e-06,
"loss": 0.2622,
"step": 890
},
{
"epoch": 1.6853932584269664,
"grad_norm": 1.109206643426623,
"learning_rate": 2.4787924694255235e-06,
"loss": 0.2674,
"step": 900
},
{
"epoch": 1.7041198501872659,
"grad_norm": 1.148003058058869,
"learning_rate": 2.4327191772166046e-06,
"loss": 0.2666,
"step": 910
},
{
"epoch": 1.7228464419475655,
"grad_norm": 1.0373837405536894,
"learning_rate": 2.3867812385731627e-06,
"loss": 0.2624,
"step": 920
},
{
"epoch": 1.7415730337078652,
"grad_norm": 1.0452119354423188,
"learning_rate": 2.340998250846629e-06,
"loss": 0.2672,
"step": 930
},
{
"epoch": 1.7602996254681647,
"grad_norm": 1.0839517512429684,
"learning_rate": 2.2953897452856035e-06,
"loss": 0.2627,
"step": 940
},
{
"epoch": 1.7790262172284645,
"grad_norm": 1.1357572847216206,
"learning_rate": 2.2499751787037404e-06,
"loss": 0.2658,
"step": 950
},
{
"epoch": 1.797752808988764,
"grad_norm": 1.049597976731673,
"learning_rate": 2.2047739251793624e-06,
"loss": 0.2634,
"step": 960
},
{
"epoch": 1.8164794007490637,
"grad_norm": 1.1411990916035244,
"learning_rate": 2.1598052677903904e-06,
"loss": 0.261,
"step": 970
},
{
"epoch": 1.8352059925093633,
"grad_norm": 1.1008798760396603,
"learning_rate": 2.1150883903880863e-06,
"loss": 0.2622,
"step": 980
},
{
"epoch": 1.8539325842696628,
"grad_norm": 1.0556909678728987,
"learning_rate": 2.0706423694131246e-06,
"loss": 0.2589,
"step": 990
},
{
"epoch": 1.8726591760299627,
"grad_norm": 1.01122078883521,
"learning_rate": 2.0264861657574936e-06,
"loss": 0.2602,
"step": 1000
},
{
"epoch": 1.8913857677902621,
"grad_norm": 1.030111452172985,
"learning_rate": 1.9826386166756796e-06,
"loss": 0.2625,
"step": 1010
},
{
"epoch": 1.9101123595505618,
"grad_norm": 1.0304981637914168,
"learning_rate": 1.9391184277486046e-06,
"loss": 0.2616,
"step": 1020
},
{
"epoch": 1.9288389513108615,
"grad_norm": 1.1002315606948327,
"learning_rate": 1.8959441649037334e-06,
"loss": 0.2618,
"step": 1030
},
{
"epoch": 1.947565543071161,
"grad_norm": 1.0682674034690947,
"learning_rate": 1.8531342464947551e-06,
"loss": 0.2604,
"step": 1040
},
{
"epoch": 1.9662921348314608,
"grad_norm": 1.0285099387800412,
"learning_rate": 1.810706935444223e-06,
"loss": 0.2623,
"step": 1050
},
{
"epoch": 1.9850187265917603,
"grad_norm": 1.054687396839439,
"learning_rate": 1.7686803314525056e-06,
"loss": 0.264,
"step": 1060
},
{
"epoch": 2.0,
"eval_loss": 0.3330671787261963,
"eval_runtime": 50.16,
"eval_samples_per_second": 286.643,
"eval_steps_per_second": 1.136,
"step": 1068
},
{
"epoch": 2.0037453183520597,
"grad_norm": 1.7923902161169425,
"learning_rate": 1.7270723632763563e-06,
"loss": 0.2446,
"step": 1070
},
{
"epoch": 2.0224719101123596,
"grad_norm": 1.345237927622502,
"learning_rate": 1.685900781080428e-06,
"loss": 0.1907,
"step": 1080
},
{
"epoch": 2.041198501872659,
"grad_norm": 1.1496616768510104,
"learning_rate": 1.6451831488649562e-06,
"loss": 0.1874,
"step": 1090
},
{
"epoch": 2.059925093632959,
"grad_norm": 1.1107541934632752,
"learning_rate": 1.6049368369728748e-06,
"loss": 0.187,
"step": 1100
},
{
"epoch": 2.0786516853932584,
"grad_norm": 1.1294609243658884,
"learning_rate": 1.5651790146795434e-06,
"loss": 0.1861,
"step": 1110
},
{
"epoch": 2.097378277153558,
"grad_norm": 1.1064659786749327,
"learning_rate": 1.525926642868242e-06,
"loss": 0.188,
"step": 1120
},
{
"epoch": 2.1161048689138577,
"grad_norm": 1.1288942946219733,
"learning_rate": 1.4871964667945815e-06,
"loss": 0.1889,
"step": 1130
},
{
"epoch": 2.134831460674157,
"grad_norm": 1.1095107607344008,
"learning_rate": 1.4490050089428965e-06,
"loss": 0.1878,
"step": 1140
},
{
"epoch": 2.153558052434457,
"grad_norm": 1.1688843518004304,
"learning_rate": 1.411368561977662e-06,
"loss": 0.1873,
"step": 1150
},
{
"epoch": 2.1722846441947565,
"grad_norm": 1.1746086833216787,
"learning_rate": 1.3743031817929725e-06,
"loss": 0.1885,
"step": 1160
},
{
"epoch": 2.191011235955056,
"grad_norm": 1.0953314936943879,
"learning_rate": 1.337824680663016e-06,
"loss": 0.1918,
"step": 1170
},
{
"epoch": 2.209737827715356,
"grad_norm": 1.1591899466119513,
"learning_rate": 1.3019486204964737e-06,
"loss": 0.1883,
"step": 1180
},
{
"epoch": 2.2284644194756553,
"grad_norm": 1.1558485332053892,
"learning_rate": 1.2666903061977397e-06,
"loss": 0.1901,
"step": 1190
},
{
"epoch": 2.247191011235955,
"grad_norm": 1.2117954838055114,
"learning_rate": 1.2320647791377731e-06,
"loss": 0.1895,
"step": 1200
},
{
"epoch": 2.2659176029962547,
"grad_norm": 1.0645164422876823,
"learning_rate": 1.1980868107373687e-06,
"loss": 0.1863,
"step": 1210
},
{
"epoch": 2.284644194756554,
"grad_norm": 1.1541528576196018,
"learning_rate": 1.1647708961656036e-06,
"loss": 0.1901,
"step": 1220
},
{
"epoch": 2.303370786516854,
"grad_norm": 1.116106510953403,
"learning_rate": 1.1321312481561198e-06,
"loss": 0.1892,
"step": 1230
},
{
"epoch": 2.3220973782771535,
"grad_norm": 1.0597155426906926,
"learning_rate": 1.1001817909439065e-06,
"loss": 0.1894,
"step": 1240
},
{
"epoch": 2.3408239700374533,
"grad_norm": 1.1196874887948434,
"learning_rate": 1.0689361543251532e-06,
"loss": 0.1887,
"step": 1250
},
{
"epoch": 2.359550561797753,
"grad_norm": 1.1714322905126089,
"learning_rate": 1.038407667842705e-06,
"loss": 0.1864,
"step": 1260
},
{
"epoch": 2.3782771535580522,
"grad_norm": 1.0763451314249386,
"learning_rate": 1.008609355099621e-06,
"loss": 0.1864,
"step": 1270
},
{
"epoch": 2.397003745318352,
"grad_norm": 1.1473816461596407,
"learning_rate": 9.795539282032386e-07,
"loss": 0.1875,
"step": 1280
},
{
"epoch": 2.4157303370786516,
"grad_norm": 1.098908279814066,
"learning_rate": 9.512537823421229e-07,
"loss": 0.1864,
"step": 1290
},
{
"epoch": 2.4344569288389515,
"grad_norm": 1.1578303830285432,
"learning_rate": 9.237209904982213e-07,
"loss": 0.1904,
"step": 1300
},
{
"epoch": 2.453183520599251,
"grad_norm": 1.1404639700697865,
"learning_rate": 8.969672982964711e-07,
"loss": 0.1907,
"step": 1310
},
{
"epoch": 2.4719101123595504,
"grad_norm": 1.0825550585492165,
"learning_rate": 8.710041189940513e-07,
"loss": 0.1868,
"step": 1320
},
{
"epoch": 2.4906367041198503,
"grad_norm": 1.1503042083334585,
"learning_rate": 8.458425286114396e-07,
"loss": 0.189,
"step": 1330
},
{
"epoch": 2.5093632958801497,
"grad_norm": 1.1289267850787381,
"learning_rate": 8.214932612073207e-07,
"loss": 0.1891,
"step": 1340
},
{
"epoch": 2.5280898876404496,
"grad_norm": 1.0922707099906583,
"learning_rate": 7.979667042993847e-07,
"loss": 0.1882,
"step": 1350
},
{
"epoch": 2.546816479400749,
"grad_norm": 1.159472662881807,
"learning_rate": 7.752728944329658e-07,
"loss": 0.1884,
"step": 1360
},
{
"epoch": 2.5655430711610485,
"grad_norm": 1.096656880156706,
"learning_rate": 7.534215128993953e-07,
"loss": 0.1912,
"step": 1370
},
{
"epoch": 2.5842696629213484,
"grad_norm": 1.177067424925165,
"learning_rate": 7.324218816059202e-07,
"loss": 0.1869,
"step": 1380
},
{
"epoch": 2.602996254681648,
"grad_norm": 1.1433902131098264,
"learning_rate": 7.122829590989305e-07,
"loss": 0.1859,
"step": 1390
},
{
"epoch": 2.6217228464419478,
"grad_norm": 1.0780282296557087,
"learning_rate": 6.930133367421965e-07,
"loss": 0.1903,
"step": 1400
},
{
"epoch": 2.640449438202247,
"grad_norm": 1.1302075316778486,
"learning_rate": 6.746212350517535e-07,
"loss": 0.187,
"step": 1410
},
{
"epoch": 2.6591760299625467,
"grad_norm": 1.1073095304447773,
"learning_rate": 6.57114500188989e-07,
"loss": 0.1886,
"step": 1420
},
{
"epoch": 2.6779026217228465,
"grad_norm": 1.0420564202777807,
"learning_rate": 6.405006006134285e-07,
"loss": 0.1879,
"step": 1430
},
{
"epoch": 2.696629213483146,
"grad_norm": 1.0777824438327903,
"learning_rate": 6.247866238966579e-07,
"loss": 0.1904,
"step": 1440
},
{
"epoch": 2.715355805243446,
"grad_norm": 1.1121227385024828,
"learning_rate": 6.099792736987272e-07,
"loss": 0.1873,
"step": 1450
},
{
"epoch": 2.7340823970037453,
"grad_norm": 1.109347730952044,
"learning_rate": 5.96084866908337e-07,
"loss": 0.1897,
"step": 1460
},
{
"epoch": 2.752808988764045,
"grad_norm": 1.131219956309559,
"learning_rate": 5.831093309480244e-07,
"loss": 0.1848,
"step": 1470
},
{
"epoch": 2.7715355805243447,
"grad_norm": 1.0563106661530495,
"learning_rate": 5.710582012454928e-07,
"loss": 0.1862,
"step": 1480
},
{
"epoch": 2.790262172284644,
"grad_norm": 1.1800848608197825,
"learning_rate": 5.599366188721713e-07,
"loss": 0.1878,
"step": 1490
},
{
"epoch": 2.808988764044944,
"grad_norm": 1.1143488700250004,
"learning_rate": 5.497493283500105e-07,
"loss": 0.1875,
"step": 1500
},
{
"epoch": 2.8277153558052435,
"grad_norm": 1.0433720861256908,
"learning_rate": 5.405006756274435e-07,
"loss": 0.1872,
"step": 1510
},
{
"epoch": 2.846441947565543,
"grad_norm": 1.1343560438226763,
"learning_rate": 5.321946062253841e-07,
"loss": 0.1894,
"step": 1520
},
{
"epoch": 2.865168539325843,
"grad_norm": 1.1296113946372626,
"learning_rate": 5.248346635540485e-07,
"loss": 0.1864,
"step": 1530
},
{
"epoch": 2.8838951310861423,
"grad_norm": 1.1286259876244906,
"learning_rate": 5.184239874013167e-07,
"loss": 0.1893,
"step": 1540
},
{
"epoch": 2.902621722846442,
"grad_norm": 1.079795528012858,
"learning_rate": 5.129653125932872e-07,
"loss": 0.1867,
"step": 1550
},
{
"epoch": 2.9213483146067416,
"grad_norm": 1.1367094539661884,
"learning_rate": 5.084609678275833e-07,
"loss": 0.1913,
"step": 1560
},
{
"epoch": 2.940074906367041,
"grad_norm": 1.1317799697421957,
"learning_rate": 5.049128746799206e-07,
"loss": 0.1874,
"step": 1570
},
{
"epoch": 2.958801498127341,
"grad_norm": 1.0575396751944557,
"learning_rate": 5.023225467843537e-07,
"loss": 0.1868,
"step": 1580
},
{
"epoch": 2.9775280898876404,
"grad_norm": 1.0774002831939866,
"learning_rate": 5.006910891875522e-07,
"loss": 0.1875,
"step": 1590
},
{
"epoch": 2.9962546816479403,
"grad_norm": 1.0987879465354378,
"learning_rate": 5.00019197877381e-07,
"loss": 0.1874,
"step": 1600
},
{
"epoch": 3.0,
"eval_loss": 0.3581169545650482,
"eval_runtime": 55.9282,
"eval_samples_per_second": 257.08,
"eval_steps_per_second": 1.019,
"step": 1602
},
{
"epoch": 3.0,
"step": 1602,
"total_flos": 2683409667194880.0,
"train_loss": 0.2738360005445992,
"train_runtime": 9858.4557,
"train_samples_per_second": 83.126,
"train_steps_per_second": 0.163
}
],
"logging_steps": 10,
"max_steps": 1602,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2683409667194880.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}