akhilfau's picture
Fine-tuned smolLM2-360M with LoRA on camel-ai/physics
b0af228 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.0,
"eval_steps": 500,
"global_step": 32000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025,
"grad_norm": 0.15203669667243958,
"learning_rate": 0.000499987952239832,
"loss": 1.1745,
"step": 100
},
{
"epoch": 0.05,
"grad_norm": 0.14502686262130737,
"learning_rate": 0.0004999518101205162,
"loss": 0.907,
"step": 200
},
{
"epoch": 0.075,
"grad_norm": 0.1642378270626068,
"learning_rate": 0.0004998915771255053,
"loss": 0.8858,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 0.15131665766239166,
"learning_rate": 0.0004998072590601808,
"loss": 0.8573,
"step": 400
},
{
"epoch": 0.125,
"grad_norm": 0.17200812697410583,
"learning_rate": 0.0004996988640512931,
"loss": 0.8833,
"step": 500
},
{
"epoch": 0.15,
"grad_norm": 0.14832615852355957,
"learning_rate": 0.000499566402546179,
"loss": 0.8689,
"step": 600
},
{
"epoch": 0.175,
"grad_norm": 0.18304194509983063,
"learning_rate": 0.0004994098873117539,
"loss": 0.8837,
"step": 700
},
{
"epoch": 0.2,
"grad_norm": 0.16831472516059875,
"learning_rate": 0.000499229333433282,
"loss": 0.8673,
"step": 800
},
{
"epoch": 0.225,
"grad_norm": 0.17680132389068604,
"learning_rate": 0.0004990247583129218,
"loss": 0.8764,
"step": 900
},
{
"epoch": 0.25,
"grad_norm": 0.15322180092334747,
"learning_rate": 0.0004987961816680492,
"loss": 0.8304,
"step": 1000
},
{
"epoch": 0.275,
"grad_norm": 0.16825291514396667,
"learning_rate": 0.0004985436255293571,
"loss": 0.8547,
"step": 1100
},
{
"epoch": 0.3,
"grad_norm": 0.18319116532802582,
"learning_rate": 0.0004982671142387316,
"loss": 0.849,
"step": 1200
},
{
"epoch": 0.325,
"grad_norm": 0.18427041172981262,
"learning_rate": 0.0004979666744469065,
"loss": 0.8537,
"step": 1300
},
{
"epoch": 0.35,
"grad_norm": 0.1925463080406189,
"learning_rate": 0.0004976423351108943,
"loss": 0.8416,
"step": 1400
},
{
"epoch": 0.375,
"grad_norm": 0.15338857471942902,
"learning_rate": 0.0004972941274911952,
"loss": 0.8214,
"step": 1500
},
{
"epoch": 0.4,
"grad_norm": 0.16722452640533447,
"learning_rate": 0.0004969220851487844,
"loss": 0.8348,
"step": 1600
},
{
"epoch": 0.425,
"grad_norm": 0.1711965799331665,
"learning_rate": 0.0004965262439418772,
"loss": 0.846,
"step": 1700
},
{
"epoch": 0.45,
"grad_norm": 0.19129395484924316,
"learning_rate": 0.0004961066420224729,
"loss": 0.852,
"step": 1800
},
{
"epoch": 0.475,
"grad_norm": 0.17632591724395752,
"learning_rate": 0.000495663319832678,
"loss": 0.8424,
"step": 1900
},
{
"epoch": 0.5,
"grad_norm": 0.16157850623130798,
"learning_rate": 0.0004951963201008077,
"loss": 0.8428,
"step": 2000
},
{
"epoch": 0.525,
"grad_norm": 0.20878523588180542,
"learning_rate": 0.0004947056878372681,
"loss": 0.8362,
"step": 2100
},
{
"epoch": 0.55,
"grad_norm": 0.22794917225837708,
"learning_rate": 0.0004941914703302181,
"loss": 0.829,
"step": 2200
},
{
"epoch": 0.575,
"grad_norm": 0.21379908919334412,
"learning_rate": 0.0004936537171410112,
"loss": 0.8569,
"step": 2300
},
{
"epoch": 0.6,
"grad_norm": 0.18703673779964447,
"learning_rate": 0.0004930924800994192,
"loss": 0.829,
"step": 2400
},
{
"epoch": 0.625,
"grad_norm": 0.1804085075855255,
"learning_rate": 0.000492507813298636,
"loss": 0.8453,
"step": 2500
},
{
"epoch": 0.65,
"grad_norm": 0.18967971205711365,
"learning_rate": 0.0004918997730900649,
"loss": 0.8465,
"step": 2600
},
{
"epoch": 0.675,
"grad_norm": 0.1845272034406662,
"learning_rate": 0.0004912684180778869,
"loss": 0.829,
"step": 2700
},
{
"epoch": 0.7,
"grad_norm": 0.21101725101470947,
"learning_rate": 0.0004906138091134118,
"loss": 0.8145,
"step": 2800
},
{
"epoch": 0.725,
"grad_norm": 0.21849285066127777,
"learning_rate": 0.0004899360092892143,
"loss": 0.8293,
"step": 2900
},
{
"epoch": 0.75,
"grad_norm": 0.2142125815153122,
"learning_rate": 0.0004892350839330522,
"loss": 0.8229,
"step": 3000
},
{
"epoch": 0.775,
"grad_norm": 0.24221359193325043,
"learning_rate": 0.0004885111006015701,
"loss": 0.8163,
"step": 3100
},
{
"epoch": 0.8,
"grad_norm": 0.1932164877653122,
"learning_rate": 0.0004877641290737884,
"loss": 0.8137,
"step": 3200
},
{
"epoch": 0.825,
"grad_norm": 0.1931457221508026,
"learning_rate": 0.0004869942413443776,
"loss": 0.8096,
"step": 3300
},
{
"epoch": 0.85,
"grad_norm": 0.20192202925682068,
"learning_rate": 0.00048620151161671955,
"loss": 0.8232,
"step": 3400
},
{
"epoch": 0.875,
"grad_norm": 0.21490222215652466,
"learning_rate": 0.0004853860162957552,
"loss": 0.8225,
"step": 3500
},
{
"epoch": 0.9,
"grad_norm": 0.2179287225008011,
"learning_rate": 0.0004845478339806211,
"loss": 0.8056,
"step": 3600
},
{
"epoch": 0.925,
"grad_norm": 0.24655170738697052,
"learning_rate": 0.0004836870454570731,
"loss": 0.8215,
"step": 3700
},
{
"epoch": 0.95,
"grad_norm": 0.20784540474414825,
"learning_rate": 0.00048280373368970086,
"loss": 0.8029,
"step": 3800
},
{
"epoch": 0.975,
"grad_norm": 0.22836102545261383,
"learning_rate": 0.000481897983813931,
"loss": 0.8205,
"step": 3900
},
{
"epoch": 1.0,
"grad_norm": 0.2156483232975006,
"learning_rate": 0.0004809698831278217,
"loss": 0.7878,
"step": 4000
},
{
"epoch": 1.0,
"eval_loss": 0.8105162382125854,
"eval_runtime": 184.0402,
"eval_samples_per_second": 21.734,
"eval_steps_per_second": 5.434,
"step": 4000
},
{
"epoch": 1.025,
"grad_norm": 0.2116573601961136,
"learning_rate": 0.00048001952108364876,
"loss": 0.7884,
"step": 4100
},
{
"epoch": 1.05,
"grad_norm": 0.21137504279613495,
"learning_rate": 0.00047904698927928404,
"loss": 0.787,
"step": 4200
},
{
"epoch": 1.075,
"grad_norm": 0.21804390847682953,
"learning_rate": 0.0004780523814493669,
"loss": 0.7977,
"step": 4300
},
{
"epoch": 1.1,
"grad_norm": 0.21372774243354797,
"learning_rate": 0.00047703579345627036,
"loss": 0.7866,
"step": 4400
},
{
"epoch": 1.125,
"grad_norm": 0.24784786999225616,
"learning_rate": 0.0004759973232808609,
"loss": 0.7764,
"step": 4500
},
{
"epoch": 1.15,
"grad_norm": 0.2134639024734497,
"learning_rate": 0.0004749370710130554,
"loss": 0.8029,
"step": 4600
},
{
"epoch": 1.175,
"grad_norm": 0.21301183104515076,
"learning_rate": 0.0004738551388421742,
"loss": 0.7936,
"step": 4700
},
{
"epoch": 1.2,
"grad_norm": 0.21645288169384003,
"learning_rate": 0.00047275163104709196,
"loss": 0.7835,
"step": 4800
},
{
"epoch": 1.225,
"grad_norm": 0.1981271356344223,
"learning_rate": 0.00047162665398618666,
"loss": 0.7951,
"step": 4900
},
{
"epoch": 1.25,
"grad_norm": 0.24540849030017853,
"learning_rate": 0.00047048031608708875,
"loss": 0.7797,
"step": 5000
},
{
"epoch": 1.275,
"grad_norm": 0.22519993782043457,
"learning_rate": 0.00046931272783623106,
"loss": 0.789,
"step": 5100
},
{
"epoch": 1.3,
"grad_norm": 0.2160399854183197,
"learning_rate": 0.0004681240017681993,
"loss": 0.8028,
"step": 5200
},
{
"epoch": 1.325,
"grad_norm": 0.20990680158138275,
"learning_rate": 0.00046691425245488607,
"loss": 0.7795,
"step": 5300
},
{
"epoch": 1.35,
"grad_norm": 0.23152625560760498,
"learning_rate": 0.00046568359649444796,
"loss": 0.7862,
"step": 5400
},
{
"epoch": 1.375,
"grad_norm": 0.27092477679252625,
"learning_rate": 0.00046443215250006805,
"loss": 0.7895,
"step": 5500
},
{
"epoch": 1.4,
"grad_norm": 0.2427772879600525,
"learning_rate": 0.00046316004108852305,
"loss": 0.8032,
"step": 5600
},
{
"epoch": 1.425,
"grad_norm": 0.24519051611423492,
"learning_rate": 0.0004618673848685586,
"loss": 0.7728,
"step": 5700
},
{
"epoch": 1.45,
"grad_norm": 0.2481563240289688,
"learning_rate": 0.0004605543084290716,
"loss": 0.7886,
"step": 5800
},
{
"epoch": 1.475,
"grad_norm": 0.24766775965690613,
"learning_rate": 0.0004592209383271023,
"loss": 0.7931,
"step": 5900
},
{
"epoch": 1.5,
"grad_norm": 0.23152729868888855,
"learning_rate": 0.00045786740307563633,
"loss": 0.7922,
"step": 6000
},
{
"epoch": 1.525,
"grad_norm": 0.2345590442419052,
"learning_rate": 0.0004564938331312183,
"loss": 0.767,
"step": 6100
},
{
"epoch": 1.55,
"grad_norm": 0.20987170934677124,
"learning_rate": 0.0004551003608813784,
"loss": 0.7873,
"step": 6200
},
{
"epoch": 1.575,
"grad_norm": 0.2418334037065506,
"learning_rate": 0.00045368712063187237,
"loss": 0.7911,
"step": 6300
},
{
"epoch": 1.6,
"grad_norm": 0.2788698673248291,
"learning_rate": 0.0004522542485937369,
"loss": 0.7535,
"step": 6400
},
{
"epoch": 1.625,
"grad_norm": 0.243035227060318,
"learning_rate": 0.0004508018828701612,
"loss": 0.773,
"step": 6500
},
{
"epoch": 1.65,
"grad_norm": 0.2505854070186615,
"learning_rate": 0.0004493301634431768,
"loss": 0.7726,
"step": 6600
},
{
"epoch": 1.675,
"grad_norm": 0.24240297079086304,
"learning_rate": 0.00044783923216016507,
"loss": 0.7776,
"step": 6700
},
{
"epoch": 1.7,
"grad_norm": 0.2779521048069,
"learning_rate": 0.0004463292327201862,
"loss": 0.7712,
"step": 6800
},
{
"epoch": 1.725,
"grad_norm": 0.25853052735328674,
"learning_rate": 0.00044480031066012916,
"loss": 0.7953,
"step": 6900
},
{
"epoch": 1.75,
"grad_norm": 0.2403198778629303,
"learning_rate": 0.0004432526133406842,
"loss": 0.7722,
"step": 7000
},
{
"epoch": 1.775,
"grad_norm": 0.2707219421863556,
"learning_rate": 0.00044168628993214036,
"loss": 0.7526,
"step": 7100
},
{
"epoch": 1.8,
"grad_norm": 0.24813227355480194,
"learning_rate": 0.0004401014914000078,
"loss": 0.7582,
"step": 7200
},
{
"epoch": 1.825,
"grad_norm": 0.24337643384933472,
"learning_rate": 0.00043849837049046735,
"loss": 0.7777,
"step": 7300
},
{
"epoch": 1.85,
"grad_norm": 0.24402017891407013,
"learning_rate": 0.00043687708171564923,
"loss": 0.7738,
"step": 7400
},
{
"epoch": 1.875,
"grad_norm": 0.2371804267168045,
"learning_rate": 0.0004352377813387398,
"loss": 0.7778,
"step": 7500
},
{
"epoch": 1.9,
"grad_norm": 0.2974371016025543,
"learning_rate": 0.0004335806273589214,
"loss": 0.7907,
"step": 7600
},
{
"epoch": 1.925,
"grad_norm": 0.2702704966068268,
"learning_rate": 0.00043190577949614375,
"loss": 0.784,
"step": 7700
},
{
"epoch": 1.95,
"grad_norm": 0.24683596193790436,
"learning_rate": 0.0004302133991757297,
"loss": 0.7663,
"step": 7800
},
{
"epoch": 1.975,
"grad_norm": 0.26394063234329224,
"learning_rate": 0.00042850364951281707,
"loss": 0.7881,
"step": 7900
},
{
"epoch": 2.0,
"grad_norm": 0.2807358205318451,
"learning_rate": 0.00042677669529663686,
"loss": 0.7877,
"step": 8000
},
{
"epoch": 2.0,
"eval_loss": 0.7847200036048889,
"eval_runtime": 158.0469,
"eval_samples_per_second": 25.309,
"eval_steps_per_second": 6.327,
"step": 8000
},
{
"epoch": 2.025,
"grad_norm": 0.26467156410217285,
"learning_rate": 0.0004250327029746309,
"loss": 0.7632,
"step": 8100
},
{
"epoch": 2.05,
"grad_norm": 0.27014681696891785,
"learning_rate": 0.000423271840636409,
"loss": 0.7493,
"step": 8200
},
{
"epoch": 2.075,
"grad_norm": 0.27495408058166504,
"learning_rate": 0.00042149427799754817,
"loss": 0.7556,
"step": 8300
},
{
"epoch": 2.1,
"grad_norm": 0.2690233886241913,
"learning_rate": 0.00041970018638323546,
"loss": 0.7294,
"step": 8400
},
{
"epoch": 2.125,
"grad_norm": 0.25307732820510864,
"learning_rate": 0.00041788973871175465,
"loss": 0.7578,
"step": 8500
},
{
"epoch": 2.15,
"grad_norm": 0.2873128056526184,
"learning_rate": 0.00041606310947782046,
"loss": 0.7526,
"step": 8600
},
{
"epoch": 2.175,
"grad_norm": 0.3025578260421753,
"learning_rate": 0.00041422047473576033,
"loss": 0.7713,
"step": 8700
},
{
"epoch": 2.2,
"grad_norm": 0.2654635012149811,
"learning_rate": 0.0004123620120825459,
"loss": 0.7644,
"step": 8800
},
{
"epoch": 2.225,
"grad_norm": 0.2813340425491333,
"learning_rate": 0.00041048790064067577,
"loss": 0.7572,
"step": 8900
},
{
"epoch": 2.25,
"grad_norm": 0.240594744682312,
"learning_rate": 0.0004085983210409114,
"loss": 0.7511,
"step": 9000
},
{
"epoch": 2.275,
"grad_norm": 0.27496930956840515,
"learning_rate": 0.0004066934554048674,
"loss": 0.7511,
"step": 9100
},
{
"epoch": 2.3,
"grad_norm": 0.2850758135318756,
"learning_rate": 0.00040477348732745853,
"loss": 0.7536,
"step": 9200
},
{
"epoch": 2.325,
"grad_norm": 0.29253271222114563,
"learning_rate": 0.0004028386018592041,
"loss": 0.749,
"step": 9300
},
{
"epoch": 2.35,
"grad_norm": 0.3089846074581146,
"learning_rate": 0.0004008889854883929,
"loss": 0.7577,
"step": 9400
},
{
"epoch": 2.375,
"grad_norm": 0.25122740864753723,
"learning_rate": 0.0003989248261231084,
"loss": 0.7342,
"step": 9500
},
{
"epoch": 2.4,
"grad_norm": 0.33366382122039795,
"learning_rate": 0.0003969463130731183,
"loss": 0.7439,
"step": 9600
},
{
"epoch": 2.425,
"grad_norm": 0.25869670510292053,
"learning_rate": 0.00039495363703162843,
"loss": 0.7484,
"step": 9700
},
{
"epoch": 2.45,
"grad_norm": 0.27346816658973694,
"learning_rate": 0.000392946990056903,
"loss": 0.7844,
"step": 9800
},
{
"epoch": 2.475,
"grad_norm": 0.27572157979011536,
"learning_rate": 0.00039092656555375416,
"loss": 0.7656,
"step": 9900
},
{
"epoch": 2.5,
"grad_norm": 0.30408018827438354,
"learning_rate": 0.00038889255825490053,
"loss": 0.7482,
"step": 10000
},
{
"epoch": 2.525,
"grad_norm": 0.28552907705307007,
"learning_rate": 0.0003868451642021992,
"loss": 0.7464,
"step": 10100
},
{
"epoch": 2.55,
"grad_norm": 0.2961609661579132,
"learning_rate": 0.0003847845807277501,
"loss": 0.7447,
"step": 10200
},
{
"epoch": 2.575,
"grad_norm": 0.2863265573978424,
"learning_rate": 0.0003827110064348773,
"loss": 0.7538,
"step": 10300
},
{
"epoch": 2.6,
"grad_norm": 0.26066234707832336,
"learning_rate": 0.0003806246411789872,
"loss": 0.7476,
"step": 10400
},
{
"epoch": 2.625,
"grad_norm": 0.2573792338371277,
"learning_rate": 0.0003785256860483054,
"loss": 0.724,
"step": 10500
},
{
"epoch": 2.65,
"grad_norm": 0.28464919328689575,
"learning_rate": 0.0003764143433444962,
"loss": 0.7395,
"step": 10600
},
{
"epoch": 2.675,
"grad_norm": 0.26662376523017883,
"learning_rate": 0.0003742908165631636,
"loss": 0.7524,
"step": 10700
},
{
"epoch": 2.7,
"grad_norm": 0.25927239656448364,
"learning_rate": 0.0003721553103742388,
"loss": 0.776,
"step": 10800
},
{
"epoch": 2.725,
"grad_norm": 0.2786635160446167,
"learning_rate": 0.0003700080306022528,
"loss": 0.7392,
"step": 10900
},
{
"epoch": 2.75,
"grad_norm": 0.26539376378059387,
"learning_rate": 0.0003678491842064995,
"loss": 0.7495,
"step": 11000
},
{
"epoch": 2.775,
"grad_norm": 0.23436130583286285,
"learning_rate": 0.00036567897926108756,
"loss": 0.7241,
"step": 11100
},
{
"epoch": 2.8,
"grad_norm": 0.28717827796936035,
"learning_rate": 0.00036349762493488667,
"loss": 0.7249,
"step": 11200
},
{
"epoch": 2.825,
"grad_norm": 0.2903422713279724,
"learning_rate": 0.0003613053314713671,
"loss": 0.7514,
"step": 11300
},
{
"epoch": 2.85,
"grad_norm": 0.3125324845314026,
"learning_rate": 0.0003591023101683355,
"loss": 0.7689,
"step": 11400
},
{
"epoch": 2.875,
"grad_norm": 0.26884034276008606,
"learning_rate": 0.0003568887733575705,
"loss": 0.7708,
"step": 11500
},
{
"epoch": 2.9,
"grad_norm": 0.2495446503162384,
"learning_rate": 0.00035466493438435703,
"loss": 0.737,
"step": 11600
},
{
"epoch": 2.925,
"grad_norm": 0.2772550582885742,
"learning_rate": 0.0003524310075869239,
"loss": 0.7618,
"step": 11700
},
{
"epoch": 2.95,
"grad_norm": 0.23050114512443542,
"learning_rate": 0.0003501872082757852,
"loss": 0.7492,
"step": 11800
},
{
"epoch": 2.975,
"grad_norm": 0.3204950988292694,
"learning_rate": 0.000347933752712989,
"loss": 0.7456,
"step": 11900
},
{
"epoch": 3.0,
"grad_norm": 0.26798272132873535,
"learning_rate": 0.0003456708580912725,
"loss": 0.7671,
"step": 12000
},
{
"epoch": 3.0,
"eval_loss": 0.770311176776886,
"eval_runtime": 157.8536,
"eval_samples_per_second": 25.34,
"eval_steps_per_second": 6.335,
"step": 12000
},
{
"epoch": 3.025,
"grad_norm": 0.2797103822231293,
"learning_rate": 0.0003433987425131291,
"loss": 0.7356,
"step": 12100
},
{
"epoch": 3.05,
"grad_norm": 0.24729490280151367,
"learning_rate": 0.0003411176249697875,
"loss": 0.7274,
"step": 12200
},
{
"epoch": 3.075,
"grad_norm": 0.24234917759895325,
"learning_rate": 0.00033882772532010404,
"loss": 0.7187,
"step": 12300
},
{
"epoch": 3.1,
"grad_norm": 0.3106192648410797,
"learning_rate": 0.0003365292642693733,
"loss": 0.7131,
"step": 12400
},
{
"epoch": 3.125,
"grad_norm": 0.3047131597995758,
"learning_rate": 0.00033422246334805503,
"loss": 0.731,
"step": 12500
},
{
"epoch": 3.15,
"grad_norm": 0.2762167751789093,
"learning_rate": 0.0003319075448904234,
"loss": 0.7399,
"step": 12600
},
{
"epoch": 3.175,
"grad_norm": 0.32366228103637695,
"learning_rate": 0.00032958473201313745,
"loss": 0.7387,
"step": 12700
},
{
"epoch": 3.2,
"grad_norm": 0.2838393747806549,
"learning_rate": 0.00032725424859373687,
"loss": 0.7297,
"step": 12800
},
{
"epoch": 3.225,
"grad_norm": 0.3418114483356476,
"learning_rate": 0.00032491631924906416,
"loss": 0.7322,
"step": 12900
},
{
"epoch": 3.25,
"grad_norm": 0.31752169132232666,
"learning_rate": 0.00032257116931361555,
"loss": 0.7466,
"step": 13000
},
{
"epoch": 3.275,
"grad_norm": 0.28176289796829224,
"learning_rate": 0.00032021902481782304,
"loss": 0.7256,
"step": 13100
},
{
"epoch": 3.3,
"grad_norm": 0.3140346109867096,
"learning_rate": 0.00031786011246626855,
"loss": 0.7275,
"step": 13200
},
{
"epoch": 3.325,
"grad_norm": 0.3124103248119354,
"learning_rate": 0.0003154946596158343,
"loss": 0.7548,
"step": 13300
},
{
"epoch": 3.35,
"grad_norm": 0.27317100763320923,
"learning_rate": 0.0003131228942537895,
"loss": 0.7437,
"step": 13400
},
{
"epoch": 3.375,
"grad_norm": 0.3227018713951111,
"learning_rate": 0.000310745044975816,
"loss": 0.7265,
"step": 13500
},
{
"epoch": 3.4,
"grad_norm": 0.2868232727050781,
"learning_rate": 0.0003083613409639764,
"loss": 0.7197,
"step": 13600
},
{
"epoch": 3.425,
"grad_norm": 0.29342934489250183,
"learning_rate": 0.00030597201196462466,
"loss": 0.7073,
"step": 13700
},
{
"epoch": 3.45,
"grad_norm": 0.3376121520996094,
"learning_rate": 0.00030357728826626266,
"loss": 0.7346,
"step": 13800
},
{
"epoch": 3.475,
"grad_norm": 0.2940613031387329,
"learning_rate": 0.00030117740067734495,
"loss": 0.7463,
"step": 13900
},
{
"epoch": 3.5,
"grad_norm": 0.2969967722892761,
"learning_rate": 0.0002987725805040321,
"loss": 0.7174,
"step": 14000
},
{
"epoch": 3.525,
"grad_norm": 0.2882630527019501,
"learning_rate": 0.0002963630595278977,
"loss": 0.726,
"step": 14100
},
{
"epoch": 3.55,
"grad_norm": 0.3352701663970947,
"learning_rate": 0.0002939490699835887,
"loss": 0.7177,
"step": 14200
},
{
"epoch": 3.575,
"grad_norm": 0.28441768884658813,
"learning_rate": 0.00029153084453644135,
"loss": 0.7385,
"step": 14300
},
{
"epoch": 3.6,
"grad_norm": 0.2801556885242462,
"learning_rate": 0.00028910861626005774,
"loss": 0.7242,
"step": 14400
},
{
"epoch": 3.625,
"grad_norm": 0.3156888782978058,
"learning_rate": 0.00028668261861384045,
"loss": 0.7207,
"step": 14500
},
{
"epoch": 3.65,
"grad_norm": 0.3086392879486084,
"learning_rate": 0.00028425308542049207,
"loss": 0.7337,
"step": 14600
},
{
"epoch": 3.675,
"grad_norm": 0.312292218208313,
"learning_rate": 0.0002818202508434783,
"loss": 0.7469,
"step": 14700
},
{
"epoch": 3.7,
"grad_norm": 0.32004839181900024,
"learning_rate": 0.00027938434936445943,
"loss": 0.7297,
"step": 14800
},
{
"epoch": 3.725,
"grad_norm": 0.2992401719093323,
"learning_rate": 0.00027694561576068985,
"loss": 0.7379,
"step": 14900
},
{
"epoch": 3.75,
"grad_norm": 0.31298381090164185,
"learning_rate": 0.0002745042850823902,
"loss": 0.742,
"step": 15000
},
{
"epoch": 3.775,
"grad_norm": 0.3190864622592926,
"learning_rate": 0.00027206059263009243,
"loss": 0.715,
"step": 15100
},
{
"epoch": 3.8,
"grad_norm": 0.30321431159973145,
"learning_rate": 0.00026961477393196127,
"loss": 0.7085,
"step": 15200
},
{
"epoch": 3.825,
"grad_norm": 0.31598585844039917,
"learning_rate": 0.0002671670647210934,
"loss": 0.7456,
"step": 15300
},
{
"epoch": 3.85,
"grad_norm": 0.3217307925224304,
"learning_rate": 0.00026471770091279724,
"loss": 0.7221,
"step": 15400
},
{
"epoch": 3.875,
"grad_norm": 0.2786950469017029,
"learning_rate": 0.00026226691858185456,
"loss": 0.7297,
"step": 15500
},
{
"epoch": 3.9,
"grad_norm": 0.29478856921195984,
"learning_rate": 0.00025981495393976716,
"loss": 0.7311,
"step": 15600
},
{
"epoch": 3.925,
"grad_norm": 0.32220613956451416,
"learning_rate": 0.00025736204331199084,
"loss": 0.7376,
"step": 15700
},
{
"epoch": 3.95,
"grad_norm": 0.296522319316864,
"learning_rate": 0.00025490842311515704,
"loss": 0.7263,
"step": 15800
},
{
"epoch": 3.975,
"grad_norm": 0.32826748490333557,
"learning_rate": 0.0002524543298342875,
"loss": 0.7393,
"step": 15900
},
{
"epoch": 4.0,
"grad_norm": 0.3822040259838104,
"learning_rate": 0.00025,
"loss": 0.7233,
"step": 16000
},
{
"epoch": 4.0,
"eval_loss": 0.7630051970481873,
"eval_runtime": 157.7404,
"eval_samples_per_second": 25.358,
"eval_steps_per_second": 6.34,
"step": 16000
},
{
"epoch": 4.025,
"grad_norm": 0.33770787715911865,
"learning_rate": 0.0002475456701657126,
"loss": 0.7406,
"step": 16100
},
{
"epoch": 4.05,
"grad_norm": 0.3054458200931549,
"learning_rate": 0.00024509157688484297,
"loss": 0.7034,
"step": 16200
},
{
"epoch": 4.075,
"grad_norm": 0.3073260188102722,
"learning_rate": 0.0002426379566880092,
"loss": 0.7141,
"step": 16300
},
{
"epoch": 4.1,
"grad_norm": 0.3053102195262909,
"learning_rate": 0.00024018504606023293,
"loss": 0.6981,
"step": 16400
},
{
"epoch": 4.125,
"grad_norm": 0.33330512046813965,
"learning_rate": 0.0002377330814181455,
"loss": 0.7122,
"step": 16500
},
{
"epoch": 4.15,
"grad_norm": 0.3163035213947296,
"learning_rate": 0.00023528229908720272,
"loss": 0.7145,
"step": 16600
},
{
"epoch": 4.175,
"grad_norm": 0.3451402485370636,
"learning_rate": 0.00023283293527890658,
"loss": 0.7039,
"step": 16700
},
{
"epoch": 4.2,
"grad_norm": 0.3371511399745941,
"learning_rate": 0.0002303852260680388,
"loss": 0.7078,
"step": 16800
},
{
"epoch": 4.225,
"grad_norm": 0.29235777258872986,
"learning_rate": 0.00022793940736990766,
"loss": 0.6777,
"step": 16900
},
{
"epoch": 4.25,
"grad_norm": 0.3207673728466034,
"learning_rate": 0.00022549571491760985,
"loss": 0.7235,
"step": 17000
},
{
"epoch": 4.275,
"grad_norm": 0.2674269378185272,
"learning_rate": 0.00022305438423931017,
"loss": 0.693,
"step": 17100
},
{
"epoch": 4.3,
"grad_norm": 0.3360178768634796,
"learning_rate": 0.00022061565063554063,
"loss": 0.7113,
"step": 17200
},
{
"epoch": 4.325,
"grad_norm": 0.3393208682537079,
"learning_rate": 0.00021817974915652172,
"loss": 0.7128,
"step": 17300
},
{
"epoch": 4.35,
"grad_norm": 0.3478228747844696,
"learning_rate": 0.00021574691457950805,
"loss": 0.7118,
"step": 17400
},
{
"epoch": 4.375,
"grad_norm": 0.3498687446117401,
"learning_rate": 0.00021331738138615958,
"loss": 0.7389,
"step": 17500
},
{
"epoch": 4.4,
"grad_norm": 0.2958277463912964,
"learning_rate": 0.00021089138373994224,
"loss": 0.723,
"step": 17600
},
{
"epoch": 4.425,
"grad_norm": 0.3038991689682007,
"learning_rate": 0.0002084691554635587,
"loss": 0.7188,
"step": 17700
},
{
"epoch": 4.45,
"grad_norm": 0.3416539132595062,
"learning_rate": 0.00020605093001641137,
"loss": 0.7004,
"step": 17800
},
{
"epoch": 4.475,
"grad_norm": 0.32150790095329285,
"learning_rate": 0.00020363694047210228,
"loss": 0.7213,
"step": 17900
},
{
"epoch": 4.5,
"grad_norm": 0.32991406321525574,
"learning_rate": 0.00020122741949596797,
"loss": 0.7045,
"step": 18000
},
{
"epoch": 4.525,
"grad_norm": 0.315838098526001,
"learning_rate": 0.00019882259932265512,
"loss": 0.7382,
"step": 18100
},
{
"epoch": 4.55,
"grad_norm": 0.35709577798843384,
"learning_rate": 0.00019642271173373735,
"loss": 0.6946,
"step": 18200
},
{
"epoch": 4.575,
"grad_norm": 0.30055126547813416,
"learning_rate": 0.00019402798803537538,
"loss": 0.7453,
"step": 18300
},
{
"epoch": 4.6,
"grad_norm": 0.33305642008781433,
"learning_rate": 0.00019163865903602372,
"loss": 0.708,
"step": 18400
},
{
"epoch": 4.625,
"grad_norm": 0.32721662521362305,
"learning_rate": 0.00018925495502418406,
"loss": 0.7182,
"step": 18500
},
{
"epoch": 4.65,
"grad_norm": 0.31406721472740173,
"learning_rate": 0.00018687710574621051,
"loss": 0.7342,
"step": 18600
},
{
"epoch": 4.675,
"grad_norm": 0.3257578909397125,
"learning_rate": 0.00018450534038416566,
"loss": 0.6923,
"step": 18700
},
{
"epoch": 4.7,
"grad_norm": 0.3608546555042267,
"learning_rate": 0.00018213988753373146,
"loss": 0.7099,
"step": 18800
},
{
"epoch": 4.725,
"grad_norm": 0.34955260157585144,
"learning_rate": 0.00017978097518217702,
"loss": 0.7386,
"step": 18900
},
{
"epoch": 4.75,
"grad_norm": 0.3003368675708771,
"learning_rate": 0.00017742883068638446,
"loss": 0.7253,
"step": 19000
},
{
"epoch": 4.775,
"grad_norm": 0.3441944718360901,
"learning_rate": 0.00017508368075093582,
"loss": 0.689,
"step": 19100
},
{
"epoch": 4.8,
"grad_norm": 0.3106822669506073,
"learning_rate": 0.00017274575140626317,
"loss": 0.7209,
"step": 19200
},
{
"epoch": 4.825,
"grad_norm": 0.3345147669315338,
"learning_rate": 0.0001704152679868626,
"loss": 0.7361,
"step": 19300
},
{
"epoch": 4.85,
"grad_norm": 0.3377486765384674,
"learning_rate": 0.00016809245510957666,
"loss": 0.7154,
"step": 19400
},
{
"epoch": 4.875,
"grad_norm": 0.2919654846191406,
"learning_rate": 0.000165777536651945,
"loss": 0.717,
"step": 19500
},
{
"epoch": 4.9,
"grad_norm": 0.32570144534111023,
"learning_rate": 0.0001634707357306267,
"loss": 0.718,
"step": 19600
},
{
"epoch": 4.925,
"grad_norm": 0.3031199872493744,
"learning_rate": 0.00016117227467989602,
"loss": 0.7127,
"step": 19700
},
{
"epoch": 4.95,
"grad_norm": 0.3258810043334961,
"learning_rate": 0.0001588823750302126,
"loss": 0.71,
"step": 19800
},
{
"epoch": 4.975,
"grad_norm": 0.3082842230796814,
"learning_rate": 0.00015660125748687094,
"loss": 0.7096,
"step": 19900
},
{
"epoch": 5.0,
"grad_norm": 0.3242599070072174,
"learning_rate": 0.00015432914190872756,
"loss": 0.7043,
"step": 20000
},
{
"epoch": 5.0,
"eval_loss": 0.7582733035087585,
"eval_runtime": 157.8194,
"eval_samples_per_second": 25.345,
"eval_steps_per_second": 6.336,
"step": 20000
},
{
"epoch": 5.025,
"grad_norm": 0.33593127131462097,
"learning_rate": 0.000152066247287011,
"loss": 0.6958,
"step": 20100
},
{
"epoch": 5.05,
"grad_norm": 0.3056102991104126,
"learning_rate": 0.00014981279172421482,
"loss": 0.7012,
"step": 20200
},
{
"epoch": 5.075,
"grad_norm": 0.34994184970855713,
"learning_rate": 0.00014756899241307614,
"loss": 0.681,
"step": 20300
},
{
"epoch": 5.1,
"grad_norm": 0.3363034427165985,
"learning_rate": 0.00014533506561564306,
"loss": 0.6864,
"step": 20400
},
{
"epoch": 5.125,
"grad_norm": 0.33991631865501404,
"learning_rate": 0.00014311122664242953,
"loss": 0.6837,
"step": 20500
},
{
"epoch": 5.15,
"grad_norm": 0.3321220576763153,
"learning_rate": 0.00014089768983166444,
"loss": 0.6938,
"step": 20600
},
{
"epoch": 5.175,
"grad_norm": 0.33707287907600403,
"learning_rate": 0.000138694668528633,
"loss": 0.6866,
"step": 20700
},
{
"epoch": 5.2,
"grad_norm": 0.36306506395339966,
"learning_rate": 0.00013650237506511331,
"loss": 0.7078,
"step": 20800
},
{
"epoch": 5.225,
"grad_norm": 0.3246331512928009,
"learning_rate": 0.0001343210207389125,
"loss": 0.7157,
"step": 20900
},
{
"epoch": 5.25,
"grad_norm": 0.2866950035095215,
"learning_rate": 0.00013215081579350058,
"loss": 0.7065,
"step": 21000
},
{
"epoch": 5.275,
"grad_norm": 0.2862119972705841,
"learning_rate": 0.00012999196939774722,
"loss": 0.6948,
"step": 21100
},
{
"epoch": 5.3,
"grad_norm": 0.3230210244655609,
"learning_rate": 0.00012784468962576134,
"loss": 0.7118,
"step": 21200
},
{
"epoch": 5.325,
"grad_norm": 0.3498855233192444,
"learning_rate": 0.00012570918343683636,
"loss": 0.7203,
"step": 21300
},
{
"epoch": 5.35,
"grad_norm": 0.34220677614212036,
"learning_rate": 0.0001235856566555039,
"loss": 0.7034,
"step": 21400
},
{
"epoch": 5.375,
"grad_norm": 0.3399185538291931,
"learning_rate": 0.0001214743139516946,
"loss": 0.6889,
"step": 21500
},
{
"epoch": 5.4,
"grad_norm": 0.3611920475959778,
"learning_rate": 0.00011937535882101281,
"loss": 0.7056,
"step": 21600
},
{
"epoch": 5.425,
"grad_norm": 0.3285171091556549,
"learning_rate": 0.00011728899356512265,
"loss": 0.6799,
"step": 21700
},
{
"epoch": 5.45,
"grad_norm": 0.385384738445282,
"learning_rate": 0.00011521541927224994,
"loss": 0.7142,
"step": 21800
},
{
"epoch": 5.475,
"grad_norm": 0.3012756407260895,
"learning_rate": 0.00011315483579780094,
"loss": 0.6988,
"step": 21900
},
{
"epoch": 5.5,
"grad_norm": 0.3784416615962982,
"learning_rate": 0.00011110744174509952,
"loss": 0.6883,
"step": 22000
},
{
"epoch": 5.525,
"grad_norm": 0.3384094536304474,
"learning_rate": 0.00010907343444624579,
"loss": 0.7226,
"step": 22100
},
{
"epoch": 5.55,
"grad_norm": 0.37700313329696655,
"learning_rate": 0.00010705300994309697,
"loss": 0.6975,
"step": 22200
},
{
"epoch": 5.575,
"grad_norm": 0.3291476368904114,
"learning_rate": 0.00010504636296837161,
"loss": 0.7041,
"step": 22300
},
{
"epoch": 5.6,
"grad_norm": 0.3541957139968872,
"learning_rate": 0.00010305368692688174,
"loss": 0.6819,
"step": 22400
},
{
"epoch": 5.625,
"grad_norm": 0.31432709097862244,
"learning_rate": 0.00010107517387689166,
"loss": 0.7128,
"step": 22500
},
{
"epoch": 5.65,
"grad_norm": 0.34442463517189026,
"learning_rate": 9.911101451160715e-05,
"loss": 0.7226,
"step": 22600
},
{
"epoch": 5.675,
"grad_norm": 0.3558264672756195,
"learning_rate": 9.716139814079594e-05,
"loss": 0.7118,
"step": 22700
},
{
"epoch": 5.7,
"grad_norm": 0.2919544577598572,
"learning_rate": 9.522651267254148e-05,
"loss": 0.7193,
"step": 22800
},
{
"epoch": 5.725,
"grad_norm": 0.3444010019302368,
"learning_rate": 9.330654459513265e-05,
"loss": 0.6942,
"step": 22900
},
{
"epoch": 5.75,
"grad_norm": 0.27795663475990295,
"learning_rate": 9.140167895908866e-05,
"loss": 0.7159,
"step": 23000
},
{
"epoch": 5.775,
"grad_norm": 0.35614287853240967,
"learning_rate": 8.951209935932425e-05,
"loss": 0.6779,
"step": 23100
},
{
"epoch": 5.8,
"grad_norm": 0.3566213548183441,
"learning_rate": 8.763798791745412e-05,
"loss": 0.7242,
"step": 23200
},
{
"epoch": 5.825,
"grad_norm": 0.3220725953578949,
"learning_rate": 8.577952526423969e-05,
"loss": 0.6914,
"step": 23300
},
{
"epoch": 5.85,
"grad_norm": 0.33407965302467346,
"learning_rate": 8.393689052217964e-05,
"loss": 0.7149,
"step": 23400
},
{
"epoch": 5.875,
"grad_norm": 0.3270919919013977,
"learning_rate": 8.211026128824539e-05,
"loss": 0.7023,
"step": 23500
},
{
"epoch": 5.9,
"grad_norm": 0.3497578799724579,
"learning_rate": 8.029981361676455e-05,
"loss": 0.7051,
"step": 23600
},
{
"epoch": 5.925,
"grad_norm": 0.31495147943496704,
"learning_rate": 7.850572200245185e-05,
"loss": 0.7158,
"step": 23700
},
{
"epoch": 5.95,
"grad_norm": 0.34137919545173645,
"learning_rate": 7.672815936359106e-05,
"loss": 0.7264,
"step": 23800
},
{
"epoch": 5.975,
"grad_norm": 0.37350091338157654,
"learning_rate": 7.496729702536912e-05,
"loss": 0.6848,
"step": 23900
},
{
"epoch": 6.0,
"grad_norm": 0.3614259362220764,
"learning_rate": 7.322330470336314e-05,
"loss": 0.6809,
"step": 24000
},
{
"epoch": 6.0,
"eval_loss": 0.7561643123626709,
"eval_runtime": 157.6906,
"eval_samples_per_second": 25.366,
"eval_steps_per_second": 6.342,
"step": 24000
},
{
"epoch": 6.025,
"grad_norm": 0.3754175901412964,
"learning_rate": 7.149635048718294e-05,
"loss": 0.6724,
"step": 24100
},
{
"epoch": 6.05,
"grad_norm": 0.3396029472351074,
"learning_rate": 6.97866008242703e-05,
"loss": 0.6737,
"step": 24200
},
{
"epoch": 6.075,
"grad_norm": 0.3344869315624237,
"learning_rate": 6.809422050385628e-05,
"loss": 0.6948,
"step": 24300
},
{
"epoch": 6.1,
"grad_norm": 0.34992533922195435,
"learning_rate": 6.641937264107867e-05,
"loss": 0.6685,
"step": 24400
},
{
"epoch": 6.125,
"grad_norm": 0.35134392976760864,
"learning_rate": 6.476221866126028e-05,
"loss": 0.6895,
"step": 24500
},
{
"epoch": 6.15,
"grad_norm": 0.3663840591907501,
"learning_rate": 6.312291828435076e-05,
"loss": 0.7168,
"step": 24600
},
{
"epoch": 6.175,
"grad_norm": 0.38176533579826355,
"learning_rate": 6.150162950953264e-05,
"loss": 0.7008,
"step": 24700
},
{
"epoch": 6.2,
"grad_norm": 0.36779487133026123,
"learning_rate": 5.989850859999227e-05,
"loss": 0.714,
"step": 24800
},
{
"epoch": 6.225,
"grad_norm": 0.3299673795700073,
"learning_rate": 5.831371006785963e-05,
"loss": 0.6937,
"step": 24900
},
{
"epoch": 6.25,
"grad_norm": 0.40928682684898376,
"learning_rate": 5.6747386659315755e-05,
"loss": 0.6806,
"step": 25000
},
{
"epoch": 6.275,
"grad_norm": 0.3500606119632721,
"learning_rate": 5.519968933987082e-05,
"loss": 0.698,
"step": 25100
},
{
"epoch": 6.3,
"grad_norm": 0.3423629403114319,
"learning_rate": 5.367076727981382e-05,
"loss": 0.6839,
"step": 25200
},
{
"epoch": 6.325,
"grad_norm": 0.34447258710861206,
"learning_rate": 5.216076783983492e-05,
"loss": 0.7238,
"step": 25300
},
{
"epoch": 6.35,
"grad_norm": 0.35269954800605774,
"learning_rate": 5.066983655682325e-05,
"loss": 0.7214,
"step": 25400
},
{
"epoch": 6.375,
"grad_norm": 0.380842387676239,
"learning_rate": 4.919811712983879e-05,
"loss": 0.6966,
"step": 25500
},
{
"epoch": 6.4,
"grad_norm": 0.3271864652633667,
"learning_rate": 4.7745751406263163e-05,
"loss": 0.6867,
"step": 25600
},
{
"epoch": 6.425,
"grad_norm": 0.308631032705307,
"learning_rate": 4.6312879368127645e-05,
"loss": 0.7056,
"step": 25700
},
{
"epoch": 6.45,
"grad_norm": 0.3455272912979126,
"learning_rate": 4.4899639118621604e-05,
"loss": 0.6774,
"step": 25800
},
{
"epoch": 6.475,
"grad_norm": 0.3664863407611847,
"learning_rate": 4.350616686878175e-05,
"loss": 0.6826,
"step": 25900
},
{
"epoch": 6.5,
"grad_norm": 0.3774823248386383,
"learning_rate": 4.213259692436367e-05,
"loss": 0.69,
"step": 26000
},
{
"epoch": 6.525,
"grad_norm": 0.35024821758270264,
"learning_rate": 4.077906167289766e-05,
"loss": 0.7111,
"step": 26100
},
{
"epoch": 6.55,
"grad_norm": 0.30343690514564514,
"learning_rate": 3.944569157092839e-05,
"loss": 0.687,
"step": 26200
},
{
"epoch": 6.575,
"grad_norm": 0.30187878012657166,
"learning_rate": 3.8132615131441396e-05,
"loss": 0.7084,
"step": 26300
},
{
"epoch": 6.6,
"grad_norm": 0.3257708251476288,
"learning_rate": 3.6839958911476953e-05,
"loss": 0.6622,
"step": 26400
},
{
"epoch": 6.625,
"grad_norm": 0.328375905752182,
"learning_rate": 3.5567847499932e-05,
"loss": 0.7004,
"step": 26500
},
{
"epoch": 6.65,
"grad_norm": 0.3516775667667389,
"learning_rate": 3.431640350555204e-05,
"loss": 0.7012,
"step": 26600
},
{
"epoch": 6.675,
"grad_norm": 0.2901289463043213,
"learning_rate": 3.308574754511404e-05,
"loss": 0.6827,
"step": 26700
},
{
"epoch": 6.7,
"grad_norm": 0.3217683732509613,
"learning_rate": 3.187599823180071e-05,
"loss": 0.6935,
"step": 26800
},
{
"epoch": 6.725,
"grad_norm": 0.360307902097702,
"learning_rate": 3.0687272163768986e-05,
"loss": 0.6861,
"step": 26900
},
{
"epoch": 6.75,
"grad_norm": 0.35294219851493835,
"learning_rate": 2.9519683912911265e-05,
"loss": 0.686,
"step": 27000
},
{
"epoch": 6.775,
"grad_norm": 0.3497380316257477,
"learning_rate": 2.8373346013813417e-05,
"loss": 0.663,
"step": 27100
},
{
"epoch": 6.8,
"grad_norm": 0.36016374826431274,
"learning_rate": 2.7248368952908055e-05,
"loss": 0.6904,
"step": 27200
},
{
"epoch": 6.825,
"grad_norm": 0.37491482496261597,
"learning_rate": 2.6144861157825773e-05,
"loss": 0.6998,
"step": 27300
},
{
"epoch": 6.85,
"grad_norm": 0.35341572761535645,
"learning_rate": 2.5062928986944677e-05,
"loss": 0.6909,
"step": 27400
},
{
"epoch": 6.875,
"grad_norm": 0.34248894453048706,
"learning_rate": 2.4002676719139166e-05,
"loss": 0.699,
"step": 27500
},
{
"epoch": 6.9,
"grad_norm": 0.3178793787956238,
"learning_rate": 2.296420654372966e-05,
"loss": 0.6879,
"step": 27600
},
{
"epoch": 6.925,
"grad_norm": 0.3343910574913025,
"learning_rate": 2.1947618550633096e-05,
"loss": 0.6952,
"step": 27700
},
{
"epoch": 6.95,
"grad_norm": 0.39180707931518555,
"learning_rate": 2.0953010720716037e-05,
"loss": 0.7093,
"step": 27800
},
{
"epoch": 6.975,
"grad_norm": 0.3549463152885437,
"learning_rate": 1.9980478916351297e-05,
"loss": 0.7203,
"step": 27900
},
{
"epoch": 7.0,
"grad_norm": 0.33500146865844727,
"learning_rate": 1.9030116872178316e-05,
"loss": 0.6795,
"step": 28000
},
{
"epoch": 7.0,
"eval_loss": 0.7552515864372253,
"eval_runtime": 157.5678,
"eval_samples_per_second": 25.386,
"eval_steps_per_second": 6.346,
"step": 28000
},
{
"epoch": 7.025,
"grad_norm": 0.3765384256839752,
"learning_rate": 1.8102016186068992e-05,
"loss": 0.6917,
"step": 28100
},
{
"epoch": 7.05,
"grad_norm": 0.32540133595466614,
"learning_rate": 1.719626631029911e-05,
"loss": 0.6983,
"step": 28200
},
{
"epoch": 7.075,
"grad_norm": 0.3466598391532898,
"learning_rate": 1.6312954542926888e-05,
"loss": 0.6723,
"step": 28300
},
{
"epoch": 7.1,
"grad_norm": 0.35120540857315063,
"learning_rate": 1.5452166019378987e-05,
"loss": 0.6802,
"step": 28400
},
{
"epoch": 7.125,
"grad_norm": 0.3241645097732544,
"learning_rate": 1.4613983704244827e-05,
"loss": 0.6722,
"step": 28500
},
{
"epoch": 7.15,
"grad_norm": 0.29869577288627625,
"learning_rate": 1.3798488383280488e-05,
"loss": 0.698,
"step": 28600
},
{
"epoch": 7.175,
"grad_norm": 0.3423502743244171,
"learning_rate": 1.3005758655622424e-05,
"loss": 0.6757,
"step": 28700
},
{
"epoch": 7.2,
"grad_norm": 0.31562918424606323,
"learning_rate": 1.2235870926211617e-05,
"loss": 0.7011,
"step": 28800
},
{
"epoch": 7.225,
"grad_norm": 0.3869987726211548,
"learning_rate": 1.1488899398429897e-05,
"loss": 0.6994,
"step": 28900
},
{
"epoch": 7.25,
"grad_norm": 0.34807926416397095,
"learning_rate": 1.0764916066947795e-05,
"loss": 0.6644,
"step": 29000
},
{
"epoch": 7.275,
"grad_norm": 0.32184460759162903,
"learning_rate": 1.0063990710785648e-05,
"loss": 0.6944,
"step": 29100
},
{
"epoch": 7.3,
"grad_norm": 0.33581066131591797,
"learning_rate": 9.386190886588208e-06,
"loss": 0.6889,
"step": 29200
},
{
"epoch": 7.325,
"grad_norm": 0.30903181433677673,
"learning_rate": 8.731581922113152e-06,
"loss": 0.7131,
"step": 29300
},
{
"epoch": 7.35,
"grad_norm": 0.3609547019004822,
"learning_rate": 8.10022690993506e-06,
"loss": 0.6729,
"step": 29400
},
{
"epoch": 7.375,
"grad_norm": 0.35342085361480713,
"learning_rate": 7.4921867013640064e-06,
"loss": 0.6824,
"step": 29500
},
{
"epoch": 7.4,
"grad_norm": 0.30028045177459717,
"learning_rate": 6.907519900580861e-06,
"loss": 0.7038,
"step": 29600
},
{
"epoch": 7.425,
"grad_norm": 0.33283013105392456,
"learning_rate": 6.34628285898875e-06,
"loss": 0.6993,
"step": 29700
},
{
"epoch": 7.45,
"grad_norm": 0.3742896318435669,
"learning_rate": 5.808529669781903e-06,
"loss": 0.6835,
"step": 29800
},
{
"epoch": 7.475,
"grad_norm": 0.4153238534927368,
"learning_rate": 5.294312162731935e-06,
"loss": 0.6899,
"step": 29900
},
{
"epoch": 7.5,
"grad_norm": 0.3647385835647583,
"learning_rate": 4.803679899192393e-06,
"loss": 0.6809,
"step": 30000
},
{
"epoch": 7.525,
"grad_norm": 0.4097493886947632,
"learning_rate": 4.336680167322055e-06,
"loss": 0.6943,
"step": 30100
},
{
"epoch": 7.55,
"grad_norm": 0.3686430752277374,
"learning_rate": 3.893357977527101e-06,
"loss": 0.6982,
"step": 30200
},
{
"epoch": 7.575,
"grad_norm": 0.3367227613925934,
"learning_rate": 3.4737560581228343e-06,
"loss": 0.6926,
"step": 30300
},
{
"epoch": 7.6,
"grad_norm": 0.33962807059288025,
"learning_rate": 3.077914851215585e-06,
"loss": 0.6978,
"step": 30400
},
{
"epoch": 7.625,
"grad_norm": 0.3672046661376953,
"learning_rate": 2.7058725088047465e-06,
"loss": 0.6876,
"step": 30500
},
{
"epoch": 7.65,
"grad_norm": 0.38116347789764404,
"learning_rate": 2.357664889105687e-06,
"loss": 0.7266,
"step": 30600
},
{
"epoch": 7.675,
"grad_norm": 0.3807855248451233,
"learning_rate": 2.0333255530934903e-06,
"loss": 0.6838,
"step": 30700
},
{
"epoch": 7.7,
"grad_norm": 0.3613605499267578,
"learning_rate": 1.7328857612684267e-06,
"loss": 0.6628,
"step": 30800
},
{
"epoch": 7.725,
"grad_norm": 0.3573913276195526,
"learning_rate": 1.4563744706429517e-06,
"loss": 0.6657,
"step": 30900
},
{
"epoch": 7.75,
"grad_norm": 0.378730833530426,
"learning_rate": 1.2038183319507957e-06,
"loss": 0.6951,
"step": 31000
},
{
"epoch": 7.775,
"grad_norm": 0.35392504930496216,
"learning_rate": 9.752416870782156e-07,
"loss": 0.7021,
"step": 31100
},
{
"epoch": 7.8,
"grad_norm": 0.37442269921302795,
"learning_rate": 7.70666566718009e-07,
"loss": 0.6924,
"step": 31200
},
{
"epoch": 7.825,
"grad_norm": 0.37346911430358887,
"learning_rate": 5.90112688246075e-07,
"loss": 0.6956,
"step": 31300
},
{
"epoch": 7.85,
"grad_norm": 0.3577325940132141,
"learning_rate": 4.335974538210441e-07,
"loss": 0.6897,
"step": 31400
},
{
"epoch": 7.875,
"grad_norm": 0.38326212763786316,
"learning_rate": 3.0113594870689873e-07,
"loss": 0.6949,
"step": 31500
},
{
"epoch": 7.9,
"grad_norm": 0.39763376116752625,
"learning_rate": 1.9274093981927476e-07,
"loss": 0.7033,
"step": 31600
},
{
"epoch": 7.925,
"grad_norm": 0.3353131413459778,
"learning_rate": 1.0842287449469579e-07,
"loss": 0.6684,
"step": 31700
},
{
"epoch": 7.95,
"grad_norm": 0.2801348865032196,
"learning_rate": 4.818987948379538e-08,
"loss": 0.6459,
"step": 31800
},
{
"epoch": 7.975,
"grad_norm": 0.3617517352104187,
"learning_rate": 1.2047760167999133e-08,
"loss": 0.701,
"step": 31900
},
{
"epoch": 8.0,
"grad_norm": 0.3444000482559204,
"learning_rate": 0.0,
"loss": 0.6832,
"step": 32000
}
],
"logging_steps": 100,
"max_steps": 32000,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.243638398976e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}