llama3-8b-sft-qlora / trainer_state.json
AliHmlii's picture
Model save
39a391e verified
raw
history blame
32 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9994372537985369,
"eval_steps": 500,
"global_step": 888,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011254924029262803,
"grad_norm": 0.8479363918304443,
"learning_rate": 2.247191011235955e-06,
"loss": 1.0084,
"step": 1
},
{
"epoch": 0.005627462014631401,
"grad_norm": 1.0625563859939575,
"learning_rate": 1.1235955056179776e-05,
"loss": 1.1513,
"step": 5
},
{
"epoch": 0.011254924029262802,
"grad_norm": 1.9530349969863892,
"learning_rate": 2.2471910112359552e-05,
"loss": 1.3647,
"step": 10
},
{
"epoch": 0.016882386043894203,
"grad_norm": 1.0856108665466309,
"learning_rate": 3.370786516853933e-05,
"loss": 1.1026,
"step": 15
},
{
"epoch": 0.022509848058525603,
"grad_norm": 1.3664439916610718,
"learning_rate": 4.4943820224719104e-05,
"loss": 0.836,
"step": 20
},
{
"epoch": 0.028137310073157007,
"grad_norm": 0.8295888304710388,
"learning_rate": 5.6179775280898885e-05,
"loss": 0.6505,
"step": 25
},
{
"epoch": 0.03376477208778841,
"grad_norm": 0.8303141593933105,
"learning_rate": 6.741573033707866e-05,
"loss": 0.6232,
"step": 30
},
{
"epoch": 0.03939223410241981,
"grad_norm": 0.6008478999137878,
"learning_rate": 7.865168539325843e-05,
"loss": 0.428,
"step": 35
},
{
"epoch": 0.04501969611705121,
"grad_norm": 0.731120228767395,
"learning_rate": 8.988764044943821e-05,
"loss": 0.4453,
"step": 40
},
{
"epoch": 0.050647158131682614,
"grad_norm": 0.503491997718811,
"learning_rate": 0.00010112359550561799,
"loss": 0.3312,
"step": 45
},
{
"epoch": 0.056274620146314014,
"grad_norm": 0.736613392829895,
"learning_rate": 0.00011235955056179777,
"loss": 0.3825,
"step": 50
},
{
"epoch": 0.061902082160945414,
"grad_norm": 1.1015475988388062,
"learning_rate": 0.00012359550561797752,
"loss": 0.2907,
"step": 55
},
{
"epoch": 0.06752954417557681,
"grad_norm": 0.7216058373451233,
"learning_rate": 0.00013483146067415732,
"loss": 0.3717,
"step": 60
},
{
"epoch": 0.07315700619020822,
"grad_norm": 1.0304406881332397,
"learning_rate": 0.0001460674157303371,
"loss": 0.4053,
"step": 65
},
{
"epoch": 0.07878446820483961,
"grad_norm": 0.717244029045105,
"learning_rate": 0.00015730337078651685,
"loss": 0.3471,
"step": 70
},
{
"epoch": 0.08441193021947102,
"grad_norm": 0.8437204360961914,
"learning_rate": 0.00016853932584269662,
"loss": 0.3412,
"step": 75
},
{
"epoch": 0.09003939223410241,
"grad_norm": 0.5658055543899536,
"learning_rate": 0.00017977528089887642,
"loss": 0.4183,
"step": 80
},
{
"epoch": 0.09566685424873382,
"grad_norm": 0.6256535053253174,
"learning_rate": 0.00019101123595505618,
"loss": 0.3304,
"step": 85
},
{
"epoch": 0.10129431626336523,
"grad_norm": 0.41338804364204407,
"learning_rate": 0.00019999922700687455,
"loss": 0.324,
"step": 90
},
{
"epoch": 0.10692177827799662,
"grad_norm": 1.2900114059448242,
"learning_rate": 0.000199972173502251,
"loss": 0.3403,
"step": 95
},
{
"epoch": 0.11254924029262803,
"grad_norm": 0.5315810441970825,
"learning_rate": 0.00019990648229089103,
"loss": 0.3061,
"step": 100
},
{
"epoch": 0.11817670230725942,
"grad_norm": 0.6736317276954651,
"learning_rate": 0.00019980217876143698,
"loss": 0.307,
"step": 105
},
{
"epoch": 0.12380416432189083,
"grad_norm": 0.5415677428245544,
"learning_rate": 0.000199659303225598,
"loss": 0.3489,
"step": 110
},
{
"epoch": 0.12943162633652222,
"grad_norm": 0.6013039946556091,
"learning_rate": 0.0001994779109025702,
"loss": 0.3426,
"step": 115
},
{
"epoch": 0.13505908835115363,
"grad_norm": 0.6924653053283691,
"learning_rate": 0.00019925807189769533,
"loss": 0.3847,
"step": 120
},
{
"epoch": 0.14068655036578503,
"grad_norm": 2.3059215545654297,
"learning_rate": 0.00019899987117536587,
"loss": 0.3521,
"step": 125
},
{
"epoch": 0.14631401238041644,
"grad_norm": 0.7815282940864563,
"learning_rate": 0.00019870340852618803,
"loss": 0.2937,
"step": 130
},
{
"epoch": 0.15194147439504782,
"grad_norm": 0.7751880288124084,
"learning_rate": 0.00019836879852841387,
"loss": 0.3299,
"step": 135
},
{
"epoch": 0.15756893640967923,
"grad_norm": 0.6737527251243591,
"learning_rate": 0.0001979961705036587,
"loss": 0.307,
"step": 140
},
{
"epoch": 0.16319639842431063,
"grad_norm": 0.4875199794769287,
"learning_rate": 0.00019758566846692029,
"loss": 0.3564,
"step": 145
},
{
"epoch": 0.16882386043894204,
"grad_norm": 0.48927029967308044,
"learning_rate": 0.00019713745107091923,
"loss": 0.3709,
"step": 150
},
{
"epoch": 0.17445132245357345,
"grad_norm": 0.8061405420303345,
"learning_rate": 0.00019665169154478213,
"loss": 0.2479,
"step": 155
},
{
"epoch": 0.18007878446820483,
"grad_norm": 0.5379148721694946,
"learning_rate": 0.00019612857762709124,
"loss": 0.2649,
"step": 160
},
{
"epoch": 0.18570624648283623,
"grad_norm": 0.659520149230957,
"learning_rate": 0.0001955683114933263,
"loss": 0.3598,
"step": 165
},
{
"epoch": 0.19133370849746764,
"grad_norm": 0.6669427752494812,
"learning_rate": 0.00019497110967772692,
"loss": 0.2604,
"step": 170
},
{
"epoch": 0.19696117051209905,
"grad_norm": 0.48957839608192444,
"learning_rate": 0.00019433720298960537,
"loss": 0.2664,
"step": 175
},
{
"epoch": 0.20258863252673046,
"grad_norm": 0.4717267155647278,
"learning_rate": 0.0001936668364241424,
"loss": 0.2949,
"step": 180
},
{
"epoch": 0.20821609454136183,
"grad_norm": 0.5135904550552368,
"learning_rate": 0.00019296026906770027,
"loss": 0.3402,
"step": 185
},
{
"epoch": 0.21384355655599324,
"grad_norm": 0.36506161093711853,
"learning_rate": 0.00019221777399768998,
"loss": 0.2935,
"step": 190
},
{
"epoch": 0.21947101857062465,
"grad_norm": 0.6421459317207336,
"learning_rate": 0.00019143963817703087,
"loss": 0.2803,
"step": 195
},
{
"epoch": 0.22509848058525606,
"grad_norm": 0.6440847516059875,
"learning_rate": 0.0001906261623432441,
"loss": 0.2791,
"step": 200
},
{
"epoch": 0.23072594259988746,
"grad_norm": 0.4226614832878113,
"learning_rate": 0.00018977766089222208,
"loss": 0.2942,
"step": 205
},
{
"epoch": 0.23635340461451884,
"grad_norm": 0.3801310360431671,
"learning_rate": 0.00018889446175671926,
"loss": 0.3874,
"step": 210
},
{
"epoch": 0.24198086662915025,
"grad_norm": 0.626059353351593,
"learning_rate": 0.00018797690627961132,
"loss": 0.4083,
"step": 215
},
{
"epoch": 0.24760832864378166,
"grad_norm": 0.35303783416748047,
"learning_rate": 0.0001870253490819713,
"loss": 0.2474,
"step": 220
},
{
"epoch": 0.25323579065841306,
"grad_norm": 0.5032233595848083,
"learning_rate": 0.00018604015792601396,
"loss": 0.236,
"step": 225
},
{
"epoch": 0.25886325267304444,
"grad_norm": 0.7859108448028564,
"learning_rate": 0.00018502171357296144,
"loss": 0.2065,
"step": 230
},
{
"epoch": 0.2644907146876759,
"grad_norm": 0.47997885942459106,
"learning_rate": 0.00018397040963588488,
"loss": 0.2757,
"step": 235
},
{
"epoch": 0.27011817670230726,
"grad_norm": 0.3313188850879669,
"learning_rate": 0.00018288665242757903,
"loss": 0.2234,
"step": 240
},
{
"epoch": 0.27574563871693863,
"grad_norm": 0.42563286423683167,
"learning_rate": 0.0001817708608035286,
"loss": 0.2735,
"step": 245
},
{
"epoch": 0.28137310073157007,
"grad_norm": 0.7245692610740662,
"learning_rate": 0.00018062346600002699,
"loss": 0.247,
"step": 250
},
{
"epoch": 0.28700056274620145,
"grad_norm": 0.41776588559150696,
"learning_rate": 0.00017944491146751026,
"loss": 0.3202,
"step": 255
},
{
"epoch": 0.2926280247608329,
"grad_norm": 0.46748408675193787,
"learning_rate": 0.0001782356526991702,
"loss": 0.2366,
"step": 260
},
{
"epoch": 0.29825548677546426,
"grad_norm": 0.3817427456378937,
"learning_rate": 0.00017699615705491325,
"loss": 0.2053,
"step": 265
},
{
"epoch": 0.30388294879009564,
"grad_norm": 0.3615594208240509,
"learning_rate": 0.00017572690358073326,
"loss": 0.1965,
"step": 270
},
{
"epoch": 0.3095104108047271,
"grad_norm": 0.48413321375846863,
"learning_rate": 0.00017442838282356727,
"loss": 0.2121,
"step": 275
},
{
"epoch": 0.31513787281935846,
"grad_norm": 0.7311267256736755,
"learning_rate": 0.00017310109664170703,
"loss": 0.3424,
"step": 280
},
{
"epoch": 0.3207653348339899,
"grad_norm": 0.4602237641811371,
"learning_rate": 0.00017174555801083814,
"loss": 0.2099,
"step": 285
},
{
"epoch": 0.32639279684862127,
"grad_norm": 0.5406469702720642,
"learning_rate": 0.00017036229082578307,
"loss": 0.3583,
"step": 290
},
{
"epoch": 0.33202025886325265,
"grad_norm": 0.26748359203338623,
"learning_rate": 0.00016895182969802386,
"loss": 0.2119,
"step": 295
},
{
"epoch": 0.3376477208778841,
"grad_norm": 0.40785279870033264,
"learning_rate": 0.00016751471974908288,
"loss": 0.2793,
"step": 300
},
{
"epoch": 0.34327518289251546,
"grad_norm": 0.5709397196769714,
"learning_rate": 0.00016605151639984187,
"loss": 0.2525,
"step": 305
},
{
"epoch": 0.3489026449071469,
"grad_norm": 0.857624351978302,
"learning_rate": 0.00016456278515588024,
"loss": 0.3339,
"step": 310
},
{
"epoch": 0.3545301069217783,
"grad_norm": 0.33093103766441345,
"learning_rate": 0.00016304910138891597,
"loss": 0.3434,
"step": 315
},
{
"epoch": 0.36015756893640966,
"grad_norm": 0.44036629796028137,
"learning_rate": 0.00016151105011443314,
"loss": 0.229,
"step": 320
},
{
"epoch": 0.3657850309510411,
"grad_norm": 0.3742360770702362,
"learning_rate": 0.00015994922576558263,
"loss": 0.3026,
"step": 325
},
{
"epoch": 0.37141249296567247,
"grad_norm": 0.3356841802597046,
"learning_rate": 0.0001583642319634426,
"loss": 0.2458,
"step": 330
},
{
"epoch": 0.3770399549803039,
"grad_norm": 0.48720481991767883,
"learning_rate": 0.00015675668128372854,
"loss": 0.2653,
"step": 335
},
{
"epoch": 0.3826674169949353,
"grad_norm": 0.6053282022476196,
"learning_rate": 0.00015512719502004197,
"loss": 0.3231,
"step": 340
},
{
"epoch": 0.38829487900956666,
"grad_norm": 0.41004592180252075,
"learning_rate": 0.00015347640294375005,
"loss": 0.3181,
"step": 345
},
{
"epoch": 0.3939223410241981,
"grad_norm": 0.6467001438140869,
"learning_rate": 0.0001518049430605887,
"loss": 0.5039,
"step": 350
},
{
"epoch": 0.3995498030388295,
"grad_norm": 0.4072398543357849,
"learning_rate": 0.0001501134613640832,
"loss": 0.22,
"step": 355
},
{
"epoch": 0.4051772650534609,
"grad_norm": 0.7158648371696472,
"learning_rate": 0.0001484026115858815,
"loss": 0.2842,
"step": 360
},
{
"epoch": 0.4108047270680923,
"grad_norm": 0.5195010304450989,
"learning_rate": 0.00014667305494309727,
"loss": 0.2622,
"step": 365
},
{
"epoch": 0.41643218908272367,
"grad_norm": 0.5050496459007263,
"learning_rate": 0.00014492545988275933,
"loss": 0.3665,
"step": 370
},
{
"epoch": 0.4220596510973551,
"grad_norm": 0.27042141556739807,
"learning_rate": 0.00014316050182346733,
"loss": 0.3115,
"step": 375
},
{
"epoch": 0.4276871131119865,
"grad_norm": 0.35634538531303406,
"learning_rate": 0.00014137886289435295,
"loss": 0.2217,
"step": 380
},
{
"epoch": 0.4333145751266179,
"grad_norm": 0.336165189743042,
"learning_rate": 0.00013958123167144733,
"loss": 0.2716,
"step": 385
},
{
"epoch": 0.4389420371412493,
"grad_norm": 0.5529112219810486,
"learning_rate": 0.00013776830291155703,
"loss": 0.3433,
"step": 390
},
{
"epoch": 0.4445694991558807,
"grad_norm": 0.3461211025714874,
"learning_rate": 0.00013594077728375128,
"loss": 0.2916,
"step": 395
},
{
"epoch": 0.4501969611705121,
"grad_norm": 0.6085760593414307,
"learning_rate": 0.00013409936109856424,
"loss": 0.2442,
"step": 400
},
{
"epoch": 0.4558244231851435,
"grad_norm": 0.601696252822876,
"learning_rate": 0.00013224476603501662,
"loss": 0.2316,
"step": 405
},
{
"epoch": 0.4614518851997749,
"grad_norm": 0.44390204548835754,
"learning_rate": 0.00013037770886556294,
"loss": 0.3902,
"step": 410
},
{
"epoch": 0.4670793472144063,
"grad_norm": 0.3032180368900299,
"learning_rate": 0.00012849891117906978,
"loss": 0.2376,
"step": 415
},
{
"epoch": 0.4727068092290377,
"grad_norm": 1.0338925123214722,
"learning_rate": 0.00012660909910193303,
"loss": 0.2524,
"step": 420
},
{
"epoch": 0.4783342712436691,
"grad_norm": 0.29725944995880127,
"learning_rate": 0.000124709003017441,
"loss": 0.2789,
"step": 425
},
{
"epoch": 0.4839617332583005,
"grad_norm": 0.5633781552314758,
"learning_rate": 0.0001227993572834926,
"loss": 0.2129,
"step": 430
},
{
"epoch": 0.48958919527293193,
"grad_norm": 0.4558543562889099,
"learning_rate": 0.0001208808999487793,
"loss": 0.2721,
"step": 435
},
{
"epoch": 0.4952166572875633,
"grad_norm": 0.41546952724456787,
"learning_rate": 0.00011895437246754074,
"loss": 0.2707,
"step": 440
},
{
"epoch": 0.5008441193021947,
"grad_norm": 0.5292194485664368,
"learning_rate": 0.00011702051941300396,
"loss": 0.2852,
"step": 445
},
{
"epoch": 0.5064715813168261,
"grad_norm": 0.44494009017944336,
"learning_rate": 0.00011508008818961731,
"loss": 0.1887,
"step": 450
},
{
"epoch": 0.5120990433314575,
"grad_norm": 2.672574043273926,
"learning_rate": 0.00011313382874419031,
"loss": 0.2296,
"step": 455
},
{
"epoch": 0.5177265053460889,
"grad_norm": 0.4071643054485321,
"learning_rate": 0.00011118249327605055,
"loss": 0.2921,
"step": 460
},
{
"epoch": 0.5233539673607203,
"grad_norm": 0.41754430532455444,
"learning_rate": 0.00010922683594633021,
"loss": 0.2266,
"step": 465
},
{
"epoch": 0.5289814293753518,
"grad_norm": 0.27264049649238586,
"learning_rate": 0.00010726761258649461,
"loss": 0.239,
"step": 470
},
{
"epoch": 0.5346088913899831,
"grad_norm": 0.5015512704849243,
"learning_rate": 0.00010530558040622472,
"loss": 0.3052,
"step": 475
},
{
"epoch": 0.5402363534046145,
"grad_norm": 0.3031107187271118,
"learning_rate": 0.00010334149770076747,
"loss": 0.3044,
"step": 480
},
{
"epoch": 0.5458638154192459,
"grad_norm": 0.75295090675354,
"learning_rate": 0.00010137612355786618,
"loss": 0.2545,
"step": 485
},
{
"epoch": 0.5514912774338773,
"grad_norm": 0.5242462754249573,
"learning_rate": 9.941021756438488e-05,
"loss": 0.2483,
"step": 490
},
{
"epoch": 0.5571187394485088,
"grad_norm": 0.30885180830955505,
"learning_rate": 9.744453951273968e-05,
"loss": 0.279,
"step": 495
},
{
"epoch": 0.5627462014631401,
"grad_norm": 0.39063045382499695,
"learning_rate": 9.547984910725064e-05,
"loss": 0.211,
"step": 500
},
{
"epoch": 0.5683736634777715,
"grad_norm": 0.3500135838985443,
"learning_rate": 9.35169056705278e-05,
"loss": 0.2148,
"step": 505
},
{
"epoch": 0.5740011254924029,
"grad_norm": 0.34409910440444946,
"learning_rate": 9.155646785000467e-05,
"loss": 0.2509,
"step": 510
},
{
"epoch": 0.5796285875070343,
"grad_norm": 0.3541821539402008,
"learning_rate": 8.959929332473262e-05,
"loss": 0.3381,
"step": 515
},
{
"epoch": 0.5852560495216658,
"grad_norm": 0.8181503415107727,
"learning_rate": 8.764613851254968e-05,
"loss": 0.2808,
"step": 520
},
{
"epoch": 0.5908835115362971,
"grad_norm": 0.36616384983062744,
"learning_rate": 8.569775827773656e-05,
"loss": 0.1762,
"step": 525
},
{
"epoch": 0.5965109735509285,
"grad_norm": 0.5440443754196167,
"learning_rate": 8.375490563927328e-05,
"loss": 0.2909,
"step": 530
},
{
"epoch": 0.6021384355655599,
"grad_norm": 0.4618743360042572,
"learning_rate": 8.181833147980894e-05,
"loss": 0.3326,
"step": 535
},
{
"epoch": 0.6077658975801913,
"grad_norm": 0.604923665523529,
"learning_rate": 7.98887842554572e-05,
"loss": 0.2639,
"step": 540
},
{
"epoch": 0.6133933595948228,
"grad_norm": 0.46064886450767517,
"learning_rate": 7.796700970652932e-05,
"loss": 0.2933,
"step": 545
},
{
"epoch": 0.6190208216094542,
"grad_norm": 0.4644724428653717,
"learning_rate": 7.605375056931712e-05,
"loss": 0.2487,
"step": 550
},
{
"epoch": 0.6246482836240855,
"grad_norm": 0.5088258981704712,
"learning_rate": 7.41497462890369e-05,
"loss": 0.2957,
"step": 555
},
{
"epoch": 0.6302757456387169,
"grad_norm": 0.6130680441856384,
"learning_rate": 7.225573273404513e-05,
"loss": 0.2588,
"step": 560
},
{
"epoch": 0.6359032076533483,
"grad_norm": 0.45168906450271606,
"learning_rate": 7.037244191143661e-05,
"loss": 0.2477,
"step": 565
},
{
"epoch": 0.6415306696679798,
"grad_norm": 0.2619435787200928,
"learning_rate": 6.850060168413518e-05,
"loss": 0.1566,
"step": 570
},
{
"epoch": 0.6471581316826112,
"grad_norm": 0.36049678921699524,
"learning_rate": 6.66409354895857e-05,
"loss": 0.3076,
"step": 575
},
{
"epoch": 0.6527855936972425,
"grad_norm": 0.20779716968536377,
"learning_rate": 6.479416206015679e-05,
"loss": 0.1849,
"step": 580
},
{
"epoch": 0.6584130557118739,
"grad_norm": 0.5497414469718933,
"learning_rate": 6.296099514536167e-05,
"loss": 0.2734,
"step": 585
},
{
"epoch": 0.6640405177265053,
"grad_norm": 0.3398374319076538,
"learning_rate": 6.114214323600504e-05,
"loss": 0.2271,
"step": 590
},
{
"epoch": 0.6696679797411368,
"grad_norm": 0.2531116306781769,
"learning_rate": 5.9338309290362324e-05,
"loss": 0.1864,
"step": 595
},
{
"epoch": 0.6752954417557682,
"grad_norm": 0.38855037093162537,
"learning_rate": 5.7550190462496946e-05,
"loss": 0.2591,
"step": 600
},
{
"epoch": 0.6809229037703995,
"grad_norm": 0.4525040090084076,
"learning_rate": 5.577847783282122e-05,
"loss": 0.3319,
"step": 605
},
{
"epoch": 0.6865503657850309,
"grad_norm": 0.5147562623023987,
"learning_rate": 5.4023856141004236e-05,
"loss": 0.2328,
"step": 610
},
{
"epoch": 0.6921778277996623,
"grad_norm": 0.48170509934425354,
"learning_rate": 5.228700352133071e-05,
"loss": 0.3459,
"step": 615
},
{
"epoch": 0.6978052898142938,
"grad_norm": 0.3423648178577423,
"learning_rate": 5.05685912406123e-05,
"loss": 0.2372,
"step": 620
},
{
"epoch": 0.7034327518289252,
"grad_norm": 0.34861063957214355,
"learning_rate": 4.886928343875341e-05,
"loss": 0.2686,
"step": 625
},
{
"epoch": 0.7090602138435566,
"grad_norm": 0.5629642009735107,
"learning_rate": 4.71897368720714e-05,
"loss": 0.1908,
"step": 630
},
{
"epoch": 0.7146876758581879,
"grad_norm": 0.47687891125679016,
"learning_rate": 4.553060065947013e-05,
"loss": 0.3051,
"step": 635
},
{
"epoch": 0.7203151378728193,
"grad_norm": 0.3600665032863617,
"learning_rate": 4.3892516031565954e-05,
"loss": 0.216,
"step": 640
},
{
"epoch": 0.7259425998874508,
"grad_norm": 0.3238658905029297,
"learning_rate": 4.227611608286147e-05,
"loss": 0.2727,
"step": 645
},
{
"epoch": 0.7315700619020822,
"grad_norm": 0.5159012675285339,
"learning_rate": 4.0682025527064486e-05,
"loss": 0.2408,
"step": 650
},
{
"epoch": 0.7371975239167136,
"grad_norm": 0.5588057041168213,
"learning_rate": 3.911086045564575e-05,
"loss": 0.2084,
"step": 655
},
{
"epoch": 0.7428249859313449,
"grad_norm": 0.33754897117614746,
"learning_rate": 3.756322809972905e-05,
"loss": 0.2405,
"step": 660
},
{
"epoch": 0.7484524479459763,
"grad_norm": 0.3047542870044708,
"learning_rate": 3.6039726595405755e-05,
"loss": 0.2054,
"step": 665
},
{
"epoch": 0.7540799099606078,
"grad_norm": 0.5348085761070251,
"learning_rate": 3.4540944752564406e-05,
"loss": 0.215,
"step": 670
},
{
"epoch": 0.7597073719752392,
"grad_norm": 0.6096937656402588,
"learning_rate": 3.3067461827324755e-05,
"loss": 0.3615,
"step": 675
},
{
"epoch": 0.7653348339898706,
"grad_norm": 0.29212549328804016,
"learning_rate": 3.161984729816415e-05,
"loss": 0.2056,
"step": 680
},
{
"epoch": 0.770962296004502,
"grad_norm": 0.40820997953414917,
"learning_rate": 3.0198660645822985e-05,
"loss": 0.2244,
"step": 685
},
{
"epoch": 0.7765897580191333,
"grad_norm": 0.35913002490997314,
"learning_rate": 2.880445113707384e-05,
"loss": 0.2536,
"step": 690
},
{
"epoch": 0.7822172200337648,
"grad_norm": 0.36664295196533203,
"learning_rate": 2.743775761243843e-05,
"loss": 0.266,
"step": 695
},
{
"epoch": 0.7878446820483962,
"grad_norm": 0.3958778381347656,
"learning_rate": 2.6099108277934103e-05,
"loss": 0.2483,
"step": 700
},
{
"epoch": 0.7934721440630276,
"grad_norm": 0.4310711920261383,
"learning_rate": 2.4789020500930095e-05,
"loss": 0.179,
"step": 705
},
{
"epoch": 0.799099606077659,
"grad_norm": 1.0834096670150757,
"learning_rate": 2.3508000610193258e-05,
"loss": 0.3142,
"step": 710
},
{
"epoch": 0.8047270680922903,
"grad_norm": 1.0267940759658813,
"learning_rate": 2.2256543700199685e-05,
"loss": 0.2546,
"step": 715
},
{
"epoch": 0.8103545301069218,
"grad_norm": 0.2950025498867035,
"learning_rate": 2.1035133439788236e-05,
"loss": 0.151,
"step": 720
},
{
"epoch": 0.8159819921215532,
"grad_norm": 0.3759666085243225,
"learning_rate": 1.9844241885230163e-05,
"loss": 0.2239,
"step": 725
},
{
"epoch": 0.8216094541361846,
"grad_norm": 0.34732529520988464,
"learning_rate": 1.8684329297786453e-05,
"loss": 0.2362,
"step": 730
},
{
"epoch": 0.827236916150816,
"grad_norm": 0.2747817635536194,
"learning_rate": 1.7555843965823992e-05,
"loss": 0.2227,
"step": 735
},
{
"epoch": 0.8328643781654473,
"grad_norm": 0.5855769515037537,
"learning_rate": 1.6459222031558974e-05,
"loss": 0.2677,
"step": 740
},
{
"epoch": 0.8384918401800788,
"grad_norm": 0.49194827675819397,
"learning_rate": 1.5394887322494732e-05,
"loss": 0.2306,
"step": 745
},
{
"epoch": 0.8441193021947102,
"grad_norm": 0.475304514169693,
"learning_rate": 1.4363251187618854e-05,
"loss": 0.3053,
"step": 750
},
{
"epoch": 0.8497467642093416,
"grad_norm": 0.37457942962646484,
"learning_rate": 1.3364712338423214e-05,
"loss": 0.1884,
"step": 755
},
{
"epoch": 0.855374226223973,
"grad_norm": 0.4119722247123718,
"learning_rate": 1.2399656694807971e-05,
"loss": 0.3746,
"step": 760
},
{
"epoch": 0.8610016882386043,
"grad_norm": 0.5167866945266724,
"learning_rate": 1.1468457235929597e-05,
"loss": 0.2426,
"step": 765
},
{
"epoch": 0.8666291502532358,
"grad_norm": 0.41178828477859497,
"learning_rate": 1.0571473856050107e-05,
"loss": 0.2886,
"step": 770
},
{
"epoch": 0.8722566122678672,
"grad_norm": 0.43694284558296204,
"learning_rate": 9.709053225443487e-06,
"loss": 0.2708,
"step": 775
},
{
"epoch": 0.8778840742824986,
"grad_norm": 0.22772341966629028,
"learning_rate": 8.881528656412963e-06,
"loss": 0.1469,
"step": 780
},
{
"epoch": 0.88351153629713,
"grad_norm": 0.38090091943740845,
"learning_rate": 8.08921997447094e-06,
"loss": 0.3346,
"step": 785
},
{
"epoch": 0.8891389983117614,
"grad_norm": 0.3263999819755554,
"learning_rate": 7.332433394731331e-06,
"loss": 0.2386,
"step": 790
},
{
"epoch": 0.8947664603263928,
"grad_norm": 0.30097389221191406,
"learning_rate": 6.611461403562147e-06,
"loss": 0.1905,
"step": 795
},
{
"epoch": 0.9003939223410242,
"grad_norm": 0.41428399085998535,
"learning_rate": 5.92658264554401e-06,
"loss": 0.2399,
"step": 800
},
{
"epoch": 0.9060213843556556,
"grad_norm": 0.4272648096084595,
"learning_rate": 5.278061815778313e-06,
"loss": 0.195,
"step": 805
},
{
"epoch": 0.911648846370287,
"grad_norm": 0.44488322734832764,
"learning_rate": 4.666149557586697e-06,
"loss": 0.2342,
"step": 810
},
{
"epoch": 0.9172763083849184,
"grad_norm": 0.3607059717178345,
"learning_rate": 4.091082365641085e-06,
"loss": 0.2702,
"step": 815
},
{
"epoch": 0.9229037703995498,
"grad_norm": 0.30268949270248413,
"learning_rate": 3.5530824945623542e-06,
"loss": 0.2509,
"step": 820
},
{
"epoch": 0.9285312324141812,
"grad_norm": 0.478462815284729,
"learning_rate": 3.0523578730221713e-06,
"loss": 0.2529,
"step": 825
},
{
"epoch": 0.9341586944288126,
"grad_norm": 0.36823543906211853,
"learning_rate": 2.589102023381895e-06,
"loss": 0.2497,
"step": 830
},
{
"epoch": 0.939786156443444,
"grad_norm": 0.6508424878120422,
"learning_rate": 2.1634939868990235e-06,
"loss": 0.4272,
"step": 835
},
{
"epoch": 0.9454136184580754,
"grad_norm": 0.4297642409801483,
"learning_rate": 1.7756982545306443e-06,
"loss": 0.2261,
"step": 840
},
{
"epoch": 0.9510410804727069,
"grad_norm": 0.2725857198238373,
"learning_rate": 1.4258647033601024e-06,
"loss": 0.1526,
"step": 845
},
{
"epoch": 0.9566685424873382,
"grad_norm": 0.366515189409256,
"learning_rate": 1.1141285386718437e-06,
"loss": 0.2365,
"step": 850
},
{
"epoch": 0.9622960045019696,
"grad_norm": 0.4519096910953522,
"learning_rate": 8.406102416967043e-07,
"loss": 0.2357,
"step": 855
},
{
"epoch": 0.967923466516601,
"grad_norm": 0.36480775475502014,
"learning_rate": 6.054155230476699e-07,
"loss": 0.2632,
"step": 860
},
{
"epoch": 0.9735509285312324,
"grad_norm": 0.29836106300354004,
"learning_rate": 4.0863528186445564e-07,
"loss": 0.2645,
"step": 865
},
{
"epoch": 0.9791783905458639,
"grad_norm": 0.33390042185783386,
"learning_rate": 2.50345570682331e-07,
"loss": 0.2836,
"step": 870
},
{
"epoch": 0.9848058525604952,
"grad_norm": 0.5138782262802124,
"learning_rate": 1.3060756603897605e-07,
"loss": 0.2035,
"step": 875
},
{
"epoch": 0.9904333145751266,
"grad_norm": 0.47627323865890503,
"learning_rate": 4.946754483071692e-08,
"loss": 0.2365,
"step": 880
},
{
"epoch": 0.996060776589758,
"grad_norm": 0.2622200548648834,
"learning_rate": 6.95686642719906e-09,
"loss": 0.1815,
"step": 885
},
{
"epoch": 0.9994372537985369,
"step": 888,
"total_flos": 8.639715708407644e+17,
"train_loss": 0.2980908542066007,
"train_runtime": 5659.1413,
"train_samples_per_second": 2.512,
"train_steps_per_second": 0.157
}
],
"logging_steps": 5,
"max_steps": 888,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.639715708407644e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}