bert_base_lda_100_v1_book / trainer_state.json
gokulsrinivasagan's picture
End of training
0edda5d verified
{
"best_metric": 4.474733829498291,
"best_model_checkpoint": "bert_base_lda_100_v1_book/checkpoint-580000",
"epoch": 25.0,
"eval_steps": 10000,
"global_step": 593075,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021076592336551025,
"grad_norm": 2.261220932006836,
"learning_rate": 5e-06,
"loss": 13.4773,
"step": 500
},
{
"epoch": 0.04215318467310205,
"grad_norm": 1.0920963287353516,
"learning_rate": 1e-05,
"loss": 11.2506,
"step": 1000
},
{
"epoch": 0.06322977700965308,
"grad_norm": 1.2633095979690552,
"learning_rate": 1.5e-05,
"loss": 10.6114,
"step": 1500
},
{
"epoch": 0.0843063693462041,
"grad_norm": 1.0412511825561523,
"learning_rate": 2e-05,
"loss": 10.4938,
"step": 2000
},
{
"epoch": 0.10538296168275513,
"grad_norm": 0.9251999855041504,
"learning_rate": 2.5e-05,
"loss": 10.4369,
"step": 2500
},
{
"epoch": 0.12645955401930617,
"grad_norm": 0.9076374769210815,
"learning_rate": 3e-05,
"loss": 10.3988,
"step": 3000
},
{
"epoch": 0.14753614635585718,
"grad_norm": 0.9712299704551697,
"learning_rate": 3.5e-05,
"loss": 10.3707,
"step": 3500
},
{
"epoch": 0.1686127386924082,
"grad_norm": 0.8643743991851807,
"learning_rate": 4e-05,
"loss": 10.3445,
"step": 4000
},
{
"epoch": 0.18968933102895924,
"grad_norm": 0.9184646010398865,
"learning_rate": 4.5e-05,
"loss": 10.3209,
"step": 4500
},
{
"epoch": 0.21076592336551025,
"grad_norm": 0.915134847164154,
"learning_rate": 5e-05,
"loss": 10.3054,
"step": 5000
},
{
"epoch": 0.2318425157020613,
"grad_norm": 0.8275418877601624,
"learning_rate": 5.500000000000001e-05,
"loss": 10.2941,
"step": 5500
},
{
"epoch": 0.25291910803861234,
"grad_norm": 0.8688166737556458,
"learning_rate": 6e-05,
"loss": 10.2762,
"step": 6000
},
{
"epoch": 0.27399570037516335,
"grad_norm": 0.775007963180542,
"learning_rate": 6.500000000000001e-05,
"loss": 10.2705,
"step": 6500
},
{
"epoch": 0.29507229271171437,
"grad_norm": 0.8859162330627441,
"learning_rate": 7e-05,
"loss": 10.2568,
"step": 7000
},
{
"epoch": 0.3161488850482654,
"grad_norm": 0.8755394220352173,
"learning_rate": 7.500000000000001e-05,
"loss": 10.2457,
"step": 7500
},
{
"epoch": 0.3372254773848164,
"grad_norm": 0.8218112587928772,
"learning_rate": 8e-05,
"loss": 10.2307,
"step": 8000
},
{
"epoch": 0.35830206972136747,
"grad_norm": 0.8845078945159912,
"learning_rate": 8.5e-05,
"loss": 10.1721,
"step": 8500
},
{
"epoch": 0.3793786620579185,
"grad_norm": 0.9402378797531128,
"learning_rate": 9e-05,
"loss": 9.9612,
"step": 9000
},
{
"epoch": 0.4004552543944695,
"grad_norm": 0.9976105093955994,
"learning_rate": 9.5e-05,
"loss": 9.7545,
"step": 9500
},
{
"epoch": 0.4215318467310205,
"grad_norm": 1.036570429801941,
"learning_rate": 0.0001,
"loss": 9.5861,
"step": 10000
},
{
"epoch": 0.4215318467310205,
"eval_accuracy": 0.16362183990094958,
"eval_loss": 9.359298706054688,
"eval_runtime": 387.4765,
"eval_samples_per_second": 311.518,
"eval_steps_per_second": 3.247,
"step": 10000
},
{
"epoch": 0.4426084390675716,
"grad_norm": 1.0330069065093994,
"learning_rate": 9.99142477382841e-05,
"loss": 9.4984,
"step": 10500
},
{
"epoch": 0.4636850314041226,
"grad_norm": 1.0583008527755737,
"learning_rate": 9.98284954765682e-05,
"loss": 9.4541,
"step": 11000
},
{
"epoch": 0.4847616237406736,
"grad_norm": 1.0603842735290527,
"learning_rate": 9.97427432148523e-05,
"loss": 9.4264,
"step": 11500
},
{
"epoch": 0.5058382160772247,
"grad_norm": 1.0387685298919678,
"learning_rate": 9.96569909531364e-05,
"loss": 9.3953,
"step": 12000
},
{
"epoch": 0.5269148084137757,
"grad_norm": 1.0398612022399902,
"learning_rate": 9.957123869142048e-05,
"loss": 9.3766,
"step": 12500
},
{
"epoch": 0.5479914007503267,
"grad_norm": 1.0386425256729126,
"learning_rate": 9.94854864297046e-05,
"loss": 9.3597,
"step": 13000
},
{
"epoch": 0.5690679930868777,
"grad_norm": 0.9679497480392456,
"learning_rate": 9.939973416798869e-05,
"loss": 9.3428,
"step": 13500
},
{
"epoch": 0.5901445854234287,
"grad_norm": 1.0438283681869507,
"learning_rate": 9.931398190627278e-05,
"loss": 9.3304,
"step": 14000
},
{
"epoch": 0.6112211777599798,
"grad_norm": 0.9864612221717834,
"learning_rate": 9.922822964455688e-05,
"loss": 9.3147,
"step": 14500
},
{
"epoch": 0.6322977700965308,
"grad_norm": 0.9195573329925537,
"learning_rate": 9.914247738284099e-05,
"loss": 9.3069,
"step": 15000
},
{
"epoch": 0.6533743624330818,
"grad_norm": 0.951598584651947,
"learning_rate": 9.905672512112507e-05,
"loss": 9.2937,
"step": 15500
},
{
"epoch": 0.6744509547696328,
"grad_norm": 0.9749416708946228,
"learning_rate": 9.897097285940917e-05,
"loss": 9.2869,
"step": 16000
},
{
"epoch": 0.6955275471061839,
"grad_norm": 1.0750514268875122,
"learning_rate": 9.888522059769327e-05,
"loss": 9.276,
"step": 16500
},
{
"epoch": 0.7166041394427349,
"grad_norm": 0.9207009673118591,
"learning_rate": 9.879946833597737e-05,
"loss": 9.2664,
"step": 17000
},
{
"epoch": 0.737680731779286,
"grad_norm": 0.9542419910430908,
"learning_rate": 9.871371607426147e-05,
"loss": 9.2559,
"step": 17500
},
{
"epoch": 0.758757324115837,
"grad_norm": 1.0007344484329224,
"learning_rate": 9.862796381254556e-05,
"loss": 9.2586,
"step": 18000
},
{
"epoch": 0.779833916452388,
"grad_norm": 0.9021992087364197,
"learning_rate": 9.854221155082965e-05,
"loss": 9.2475,
"step": 18500
},
{
"epoch": 0.800910508788939,
"grad_norm": 0.9669854640960693,
"learning_rate": 9.845645928911376e-05,
"loss": 9.246,
"step": 19000
},
{
"epoch": 0.82198710112549,
"grad_norm": 0.8515693545341492,
"learning_rate": 9.837070702739786e-05,
"loss": 9.2397,
"step": 19500
},
{
"epoch": 0.843063693462041,
"grad_norm": 0.8911448121070862,
"learning_rate": 9.828495476568194e-05,
"loss": 9.2311,
"step": 20000
},
{
"epoch": 0.843063693462041,
"eval_accuracy": 0.1641480244519192,
"eval_loss": 9.070075988769531,
"eval_runtime": 398.6781,
"eval_samples_per_second": 302.766,
"eval_steps_per_second": 3.155,
"step": 20000
},
{
"epoch": 0.864140285798592,
"grad_norm": 0.8714049458503723,
"learning_rate": 9.819920250396604e-05,
"loss": 9.223,
"step": 20500
},
{
"epoch": 0.8852168781351432,
"grad_norm": 0.9097802042961121,
"learning_rate": 9.811345024225015e-05,
"loss": 9.2214,
"step": 21000
},
{
"epoch": 0.9062934704716942,
"grad_norm": 0.9716510772705078,
"learning_rate": 9.802769798053424e-05,
"loss": 9.2148,
"step": 21500
},
{
"epoch": 0.9273700628082452,
"grad_norm": 0.878089964389801,
"learning_rate": 9.794194571881834e-05,
"loss": 9.2062,
"step": 22000
},
{
"epoch": 0.9484466551447962,
"grad_norm": 0.8791041374206543,
"learning_rate": 9.785619345710244e-05,
"loss": 9.2022,
"step": 22500
},
{
"epoch": 0.9695232474813472,
"grad_norm": 0.8767024874687195,
"learning_rate": 9.777044119538653e-05,
"loss": 9.1946,
"step": 23000
},
{
"epoch": 0.9905998398178982,
"grad_norm": 1.0245541334152222,
"learning_rate": 9.768468893367063e-05,
"loss": 9.1902,
"step": 23500
},
{
"epoch": 1.0116764321544494,
"grad_norm": 0.9279555082321167,
"learning_rate": 9.759893667195473e-05,
"loss": 9.1846,
"step": 24000
},
{
"epoch": 1.0327530244910004,
"grad_norm": 0.9039582014083862,
"learning_rate": 9.751318441023883e-05,
"loss": 9.1787,
"step": 24500
},
{
"epoch": 1.0538296168275514,
"grad_norm": 0.8920139670372009,
"learning_rate": 9.742743214852293e-05,
"loss": 9.1769,
"step": 25000
},
{
"epoch": 1.0749062091641024,
"grad_norm": 0.9082123637199402,
"learning_rate": 9.734167988680703e-05,
"loss": 9.1661,
"step": 25500
},
{
"epoch": 1.0959828015006534,
"grad_norm": 0.883570671081543,
"learning_rate": 9.725592762509111e-05,
"loss": 9.1627,
"step": 26000
},
{
"epoch": 1.1170593938372044,
"grad_norm": 0.8912183046340942,
"learning_rate": 9.717017536337522e-05,
"loss": 9.1573,
"step": 26500
},
{
"epoch": 1.1381359861737554,
"grad_norm": 0.8594160079956055,
"learning_rate": 9.708442310165932e-05,
"loss": 9.1611,
"step": 27000
},
{
"epoch": 1.1592125785103065,
"grad_norm": 0.8732885718345642,
"learning_rate": 9.69986708399434e-05,
"loss": 9.1521,
"step": 27500
},
{
"epoch": 1.1802891708468575,
"grad_norm": 0.8107864260673523,
"learning_rate": 9.69129185782275e-05,
"loss": 9.1535,
"step": 28000
},
{
"epoch": 1.2013657631834085,
"grad_norm": 0.8748419284820557,
"learning_rate": 9.68271663165116e-05,
"loss": 9.1421,
"step": 28500
},
{
"epoch": 1.2224423555199595,
"grad_norm": 0.8904475569725037,
"learning_rate": 9.67414140547957e-05,
"loss": 9.1389,
"step": 29000
},
{
"epoch": 1.2435189478565105,
"grad_norm": 0.9183618426322937,
"learning_rate": 9.66556617930798e-05,
"loss": 9.1326,
"step": 29500
},
{
"epoch": 1.2645955401930615,
"grad_norm": 0.8394513130187988,
"learning_rate": 9.656990953136388e-05,
"loss": 9.1362,
"step": 30000
},
{
"epoch": 1.2645955401930615,
"eval_accuracy": 0.16621661524240725,
"eval_loss": 8.988872528076172,
"eval_runtime": 402.817,
"eval_samples_per_second": 299.655,
"eval_steps_per_second": 3.123,
"step": 30000
},
{
"epoch": 1.2856721325296125,
"grad_norm": 0.8479052186012268,
"learning_rate": 9.6484157269648e-05,
"loss": 9.1268,
"step": 30500
},
{
"epoch": 1.3067487248661636,
"grad_norm": 0.8836435079574585,
"learning_rate": 9.639840500793209e-05,
"loss": 9.1284,
"step": 31000
},
{
"epoch": 1.3278253172027146,
"grad_norm": 0.8392910361289978,
"learning_rate": 9.631265274621618e-05,
"loss": 9.1226,
"step": 31500
},
{
"epoch": 1.3489019095392658,
"grad_norm": 0.851066529750824,
"learning_rate": 9.622690048450028e-05,
"loss": 9.1247,
"step": 32000
},
{
"epoch": 1.3699785018758166,
"grad_norm": 0.8625577092170715,
"learning_rate": 9.614114822278439e-05,
"loss": 9.1173,
"step": 32500
},
{
"epoch": 1.3910550942123678,
"grad_norm": 0.8905825614929199,
"learning_rate": 9.605539596106847e-05,
"loss": 9.1176,
"step": 33000
},
{
"epoch": 1.4121316865489189,
"grad_norm": 0.8313146829605103,
"learning_rate": 9.596964369935257e-05,
"loss": 9.1133,
"step": 33500
},
{
"epoch": 1.4332082788854699,
"grad_norm": 0.8795130848884583,
"learning_rate": 9.588389143763667e-05,
"loss": 9.1069,
"step": 34000
},
{
"epoch": 1.4542848712220209,
"grad_norm": 0.8162212371826172,
"learning_rate": 9.579813917592077e-05,
"loss": 9.1067,
"step": 34500
},
{
"epoch": 1.475361463558572,
"grad_norm": 0.8535031676292419,
"learning_rate": 9.571238691420487e-05,
"loss": 9.1028,
"step": 35000
},
{
"epoch": 1.496438055895123,
"grad_norm": 1.0675309896469116,
"learning_rate": 9.562663465248896e-05,
"loss": 9.0519,
"step": 35500
},
{
"epoch": 1.517514648231674,
"grad_norm": 1.2214007377624512,
"learning_rate": 9.554088239077306e-05,
"loss": 8.4557,
"step": 36000
},
{
"epoch": 1.538591240568225,
"grad_norm": 1.318041205406189,
"learning_rate": 9.545513012905716e-05,
"loss": 8.0001,
"step": 36500
},
{
"epoch": 1.559667832904776,
"grad_norm": 1.312430739402771,
"learning_rate": 9.536937786734126e-05,
"loss": 7.8229,
"step": 37000
},
{
"epoch": 1.580744425241327,
"grad_norm": 1.3488483428955078,
"learning_rate": 9.528362560562534e-05,
"loss": 7.694,
"step": 37500
},
{
"epoch": 1.601821017577878,
"grad_norm": 1.3927927017211914,
"learning_rate": 9.519787334390946e-05,
"loss": 7.5924,
"step": 38000
},
{
"epoch": 1.622897609914429,
"grad_norm": 1.2621397972106934,
"learning_rate": 9.511212108219355e-05,
"loss": 7.4694,
"step": 38500
},
{
"epoch": 1.64397420225098,
"grad_norm": 1.214800238609314,
"learning_rate": 9.502636882047764e-05,
"loss": 7.2611,
"step": 39000
},
{
"epoch": 1.6650507945875312,
"grad_norm": 1.2118133306503296,
"learning_rate": 9.494061655876174e-05,
"loss": 7.1014,
"step": 39500
},
{
"epoch": 1.686127386924082,
"grad_norm": 1.247459888458252,
"learning_rate": 9.485486429704585e-05,
"loss": 6.984,
"step": 40000
},
{
"epoch": 1.686127386924082,
"eval_accuracy": 0.4683214904541625,
"eval_loss": 6.448629379272461,
"eval_runtime": 278.7749,
"eval_samples_per_second": 432.987,
"eval_steps_per_second": 4.513,
"step": 40000
},
{
"epoch": 1.7072039792606333,
"grad_norm": 1.329538106918335,
"learning_rate": 9.476911203532993e-05,
"loss": 6.8782,
"step": 40500
},
{
"epoch": 1.728280571597184,
"grad_norm": 1.244707703590393,
"learning_rate": 9.468335977361403e-05,
"loss": 6.7865,
"step": 41000
},
{
"epoch": 1.7493571639337353,
"grad_norm": 1.286187767982483,
"learning_rate": 9.459760751189813e-05,
"loss": 6.6955,
"step": 41500
},
{
"epoch": 1.770433756270286,
"grad_norm": 1.3048105239868164,
"learning_rate": 9.451185525018223e-05,
"loss": 6.6017,
"step": 42000
},
{
"epoch": 1.7915103486068373,
"grad_norm": 1.3026583194732666,
"learning_rate": 9.442610298846633e-05,
"loss": 6.5096,
"step": 42500
},
{
"epoch": 1.8125869409433881,
"grad_norm": 1.3210301399230957,
"learning_rate": 9.434035072675043e-05,
"loss": 6.4334,
"step": 43000
},
{
"epoch": 1.8336635332799394,
"grad_norm": 1.2599812746047974,
"learning_rate": 9.425459846503451e-05,
"loss": 6.3625,
"step": 43500
},
{
"epoch": 1.8547401256164904,
"grad_norm": 1.2761399745941162,
"learning_rate": 9.416884620331862e-05,
"loss": 6.3027,
"step": 44000
},
{
"epoch": 1.8758167179530414,
"grad_norm": 1.3949856758117676,
"learning_rate": 9.408309394160272e-05,
"loss": 6.2531,
"step": 44500
},
{
"epoch": 1.8968933102895924,
"grad_norm": 1.3100916147232056,
"learning_rate": 9.39973416798868e-05,
"loss": 6.1946,
"step": 45000
},
{
"epoch": 1.9179699026261434,
"grad_norm": 1.2545853853225708,
"learning_rate": 9.39115894181709e-05,
"loss": 6.1575,
"step": 45500
},
{
"epoch": 1.9390464949626944,
"grad_norm": 1.241576910018921,
"learning_rate": 9.382583715645502e-05,
"loss": 6.114,
"step": 46000
},
{
"epoch": 1.9601230872992454,
"grad_norm": 1.2163891792297363,
"learning_rate": 9.37400848947391e-05,
"loss": 6.0761,
"step": 46500
},
{
"epoch": 1.9811996796357965,
"grad_norm": 1.2884899377822876,
"learning_rate": 9.36543326330232e-05,
"loss": 6.0511,
"step": 47000
},
{
"epoch": 2.0022762719723475,
"grad_norm": 1.3094465732574463,
"learning_rate": 9.35685803713073e-05,
"loss": 6.0234,
"step": 47500
},
{
"epoch": 2.0233528643088987,
"grad_norm": 1.2269563674926758,
"learning_rate": 9.34828281095914e-05,
"loss": 5.9825,
"step": 48000
},
{
"epoch": 2.0444294566454495,
"grad_norm": 1.2191238403320312,
"learning_rate": 9.339707584787549e-05,
"loss": 5.9574,
"step": 48500
},
{
"epoch": 2.0655060489820007,
"grad_norm": 1.2610235214233398,
"learning_rate": 9.331132358615959e-05,
"loss": 5.9266,
"step": 49000
},
{
"epoch": 2.0865826413185515,
"grad_norm": 1.3451616764068604,
"learning_rate": 9.322557132444369e-05,
"loss": 5.9029,
"step": 49500
},
{
"epoch": 2.1076592336551028,
"grad_norm": 1.299438238143921,
"learning_rate": 9.313981906272779e-05,
"loss": 5.8878,
"step": 50000
},
{
"epoch": 2.1076592336551028,
"eval_accuracy": 0.5941274293510647,
"eval_loss": 5.543643951416016,
"eval_runtime": 280.2686,
"eval_samples_per_second": 430.68,
"eval_steps_per_second": 4.489,
"step": 50000
},
{
"epoch": 2.1287358259916536,
"grad_norm": 1.2258011102676392,
"learning_rate": 9.305406680101189e-05,
"loss": 5.869,
"step": 50500
},
{
"epoch": 2.149812418328205,
"grad_norm": 1.2397315502166748,
"learning_rate": 9.296831453929597e-05,
"loss": 5.845,
"step": 51000
},
{
"epoch": 2.1708890106647556,
"grad_norm": 1.271088719367981,
"learning_rate": 9.288256227758008e-05,
"loss": 5.8236,
"step": 51500
},
{
"epoch": 2.191965603001307,
"grad_norm": 1.2662914991378784,
"learning_rate": 9.279681001586418e-05,
"loss": 5.8079,
"step": 52000
},
{
"epoch": 2.2130421953378576,
"grad_norm": 1.2614364624023438,
"learning_rate": 9.271105775414827e-05,
"loss": 5.792,
"step": 52500
},
{
"epoch": 2.234118787674409,
"grad_norm": 1.2903742790222168,
"learning_rate": 9.262530549243236e-05,
"loss": 5.781,
"step": 53000
},
{
"epoch": 2.2551953800109596,
"grad_norm": 1.1807827949523926,
"learning_rate": 9.253955323071646e-05,
"loss": 5.7643,
"step": 53500
},
{
"epoch": 2.276271972347511,
"grad_norm": 1.2336211204528809,
"learning_rate": 9.245380096900056e-05,
"loss": 5.7511,
"step": 54000
},
{
"epoch": 2.2973485646840617,
"grad_norm": 1.2572416067123413,
"learning_rate": 9.236804870728466e-05,
"loss": 5.7289,
"step": 54500
},
{
"epoch": 2.318425157020613,
"grad_norm": 1.258495569229126,
"learning_rate": 9.228229644556876e-05,
"loss": 5.7228,
"step": 55000
},
{
"epoch": 2.339501749357164,
"grad_norm": 1.2932839393615723,
"learning_rate": 9.219654418385286e-05,
"loss": 5.7016,
"step": 55500
},
{
"epoch": 2.360578341693715,
"grad_norm": 1.2328627109527588,
"learning_rate": 9.211079192213695e-05,
"loss": 5.7003,
"step": 56000
},
{
"epoch": 2.381654934030266,
"grad_norm": 1.2239735126495361,
"learning_rate": 9.202503966042105e-05,
"loss": 5.6838,
"step": 56500
},
{
"epoch": 2.402731526366817,
"grad_norm": 1.248216986656189,
"learning_rate": 9.193928739870514e-05,
"loss": 5.6698,
"step": 57000
},
{
"epoch": 2.423808118703368,
"grad_norm": 1.235373854637146,
"learning_rate": 9.185353513698925e-05,
"loss": 5.6581,
"step": 57500
},
{
"epoch": 2.444884711039919,
"grad_norm": 1.2324343919754028,
"learning_rate": 9.176778287527335e-05,
"loss": 5.6488,
"step": 58000
},
{
"epoch": 2.4659613033764702,
"grad_norm": 1.2697035074234009,
"learning_rate": 9.168203061355743e-05,
"loss": 5.6396,
"step": 58500
},
{
"epoch": 2.487037895713021,
"grad_norm": 1.2375431060791016,
"learning_rate": 9.159627835184153e-05,
"loss": 5.6277,
"step": 59000
},
{
"epoch": 2.5081144880495723,
"grad_norm": 1.2430800199508667,
"learning_rate": 9.151052609012564e-05,
"loss": 5.6206,
"step": 59500
},
{
"epoch": 2.529191080386123,
"grad_norm": 1.2040998935699463,
"learning_rate": 9.142477382840973e-05,
"loss": 5.6092,
"step": 60000
},
{
"epoch": 2.529191080386123,
"eval_accuracy": 0.6335772190183138,
"eval_loss": 5.286244869232178,
"eval_runtime": 278.9208,
"eval_samples_per_second": 432.761,
"eval_steps_per_second": 4.51,
"step": 60000
},
{
"epoch": 2.5502676727226743,
"grad_norm": 1.1964696645736694,
"learning_rate": 9.133902156669383e-05,
"loss": 5.5974,
"step": 60500
},
{
"epoch": 2.571344265059225,
"grad_norm": 1.1946040391921997,
"learning_rate": 9.125326930497792e-05,
"loss": 5.591,
"step": 61000
},
{
"epoch": 2.5924208573957763,
"grad_norm": 1.208908200263977,
"learning_rate": 9.116751704326202e-05,
"loss": 5.5797,
"step": 61500
},
{
"epoch": 2.613497449732327,
"grad_norm": 1.1945561170578003,
"learning_rate": 9.108176478154612e-05,
"loss": 5.5689,
"step": 62000
},
{
"epoch": 2.6345740420688784,
"grad_norm": 1.2383745908737183,
"learning_rate": 9.09960125198302e-05,
"loss": 5.5708,
"step": 62500
},
{
"epoch": 2.655650634405429,
"grad_norm": 1.2635372877120972,
"learning_rate": 9.091026025811432e-05,
"loss": 5.5548,
"step": 63000
},
{
"epoch": 2.6767272267419804,
"grad_norm": 1.26026451587677,
"learning_rate": 9.082450799639842e-05,
"loss": 5.5479,
"step": 63500
},
{
"epoch": 2.6978038190785316,
"grad_norm": 1.2060080766677856,
"learning_rate": 9.07387557346825e-05,
"loss": 5.5418,
"step": 64000
},
{
"epoch": 2.7188804114150824,
"grad_norm": 1.2217984199523926,
"learning_rate": 9.06530034729666e-05,
"loss": 5.5318,
"step": 64500
},
{
"epoch": 2.739957003751633,
"grad_norm": 1.2290257215499878,
"learning_rate": 9.05672512112507e-05,
"loss": 5.526,
"step": 65000
},
{
"epoch": 2.7610335960881844,
"grad_norm": 1.2221649885177612,
"learning_rate": 9.04814989495348e-05,
"loss": 5.5188,
"step": 65500
},
{
"epoch": 2.7821101884247357,
"grad_norm": 1.2461947202682495,
"learning_rate": 9.03957466878189e-05,
"loss": 5.5149,
"step": 66000
},
{
"epoch": 2.8031867807612865,
"grad_norm": 1.209800362586975,
"learning_rate": 9.030999442610299e-05,
"loss": 5.5031,
"step": 66500
},
{
"epoch": 2.8242633730978377,
"grad_norm": 1.183297872543335,
"learning_rate": 9.022424216438709e-05,
"loss": 5.494,
"step": 67000
},
{
"epoch": 2.8453399654343885,
"grad_norm": 1.169618844985962,
"learning_rate": 9.013848990267119e-05,
"loss": 5.4878,
"step": 67500
},
{
"epoch": 2.8664165577709397,
"grad_norm": 1.232101559638977,
"learning_rate": 9.005273764095529e-05,
"loss": 5.4803,
"step": 68000
},
{
"epoch": 2.8874931501074905,
"grad_norm": 1.2106157541275024,
"learning_rate": 8.996698537923937e-05,
"loss": 5.4805,
"step": 68500
},
{
"epoch": 2.9085697424440418,
"grad_norm": 1.2430484294891357,
"learning_rate": 8.988123311752348e-05,
"loss": 5.4691,
"step": 69000
},
{
"epoch": 2.9296463347805926,
"grad_norm": 1.2093271017074585,
"learning_rate": 8.979548085580758e-05,
"loss": 5.4661,
"step": 69500
},
{
"epoch": 2.950722927117144,
"grad_norm": 1.164513111114502,
"learning_rate": 8.970972859409167e-05,
"loss": 5.4567,
"step": 70000
},
{
"epoch": 2.950722927117144,
"eval_accuracy": 0.6530826952326345,
"eval_loss": 5.155585765838623,
"eval_runtime": 281.3671,
"eval_samples_per_second": 428.998,
"eval_steps_per_second": 4.471,
"step": 70000
},
{
"epoch": 2.9717995194536946,
"grad_norm": 1.2059588432312012,
"learning_rate": 8.962397633237576e-05,
"loss": 5.455,
"step": 70500
},
{
"epoch": 2.992876111790246,
"grad_norm": 1.1862009763717651,
"learning_rate": 8.953822407065988e-05,
"loss": 5.4474,
"step": 71000
},
{
"epoch": 3.0139527041267966,
"grad_norm": 1.2490551471710205,
"learning_rate": 8.945247180894396e-05,
"loss": 5.4395,
"step": 71500
},
{
"epoch": 3.035029296463348,
"grad_norm": 1.179486870765686,
"learning_rate": 8.936671954722806e-05,
"loss": 5.4332,
"step": 72000
},
{
"epoch": 3.0561058887998986,
"grad_norm": 1.22465980052948,
"learning_rate": 8.928096728551216e-05,
"loss": 5.4299,
"step": 72500
},
{
"epoch": 3.07718248113645,
"grad_norm": 1.1607983112335205,
"learning_rate": 8.919521502379626e-05,
"loss": 5.421,
"step": 73000
},
{
"epoch": 3.0982590734730007,
"grad_norm": 1.2240633964538574,
"learning_rate": 8.910946276208035e-05,
"loss": 5.4218,
"step": 73500
},
{
"epoch": 3.119335665809552,
"grad_norm": 1.1680089235305786,
"learning_rate": 8.902371050036445e-05,
"loss": 5.407,
"step": 74000
},
{
"epoch": 3.140412258146103,
"grad_norm": 1.1453983783721924,
"learning_rate": 8.893795823864855e-05,
"loss": 5.4095,
"step": 74500
},
{
"epoch": 3.161488850482654,
"grad_norm": 1.245374083518982,
"learning_rate": 8.885220597693265e-05,
"loss": 5.404,
"step": 75000
},
{
"epoch": 3.182565442819205,
"grad_norm": 1.2105519771575928,
"learning_rate": 8.876645371521675e-05,
"loss": 5.4001,
"step": 75500
},
{
"epoch": 3.203642035155756,
"grad_norm": 1.1715927124023438,
"learning_rate": 8.868070145350083e-05,
"loss": 5.4003,
"step": 76000
},
{
"epoch": 3.224718627492307,
"grad_norm": 1.1938034296035767,
"learning_rate": 8.859494919178493e-05,
"loss": 5.3952,
"step": 76500
},
{
"epoch": 3.245795219828858,
"grad_norm": 1.243013858795166,
"learning_rate": 8.850919693006904e-05,
"loss": 5.3869,
"step": 77000
},
{
"epoch": 3.2668718121654092,
"grad_norm": 1.2195545434951782,
"learning_rate": 8.842344466835313e-05,
"loss": 5.3798,
"step": 77500
},
{
"epoch": 3.28794840450196,
"grad_norm": 1.1958162784576416,
"learning_rate": 8.833769240663723e-05,
"loss": 5.3745,
"step": 78000
},
{
"epoch": 3.3090249968385113,
"grad_norm": 1.2448384761810303,
"learning_rate": 8.825194014492132e-05,
"loss": 5.3721,
"step": 78500
},
{
"epoch": 3.330101589175062,
"grad_norm": 1.1973018646240234,
"learning_rate": 8.816618788320542e-05,
"loss": 5.3683,
"step": 79000
},
{
"epoch": 3.3511781815116133,
"grad_norm": 1.1759694814682007,
"learning_rate": 8.808043562148952e-05,
"loss": 5.3636,
"step": 79500
},
{
"epoch": 3.372254773848164,
"grad_norm": 1.2234734296798706,
"learning_rate": 8.799468335977362e-05,
"loss": 5.3612,
"step": 80000
},
{
"epoch": 3.372254773848164,
"eval_accuracy": 0.6664779926157723,
"eval_loss": 5.066525459289551,
"eval_runtime": 284.0191,
"eval_samples_per_second": 424.993,
"eval_steps_per_second": 4.429,
"step": 80000
},
{
"epoch": 3.3933313661847153,
"grad_norm": 1.176066517829895,
"learning_rate": 8.790893109805772e-05,
"loss": 5.3575,
"step": 80500
},
{
"epoch": 3.414407958521266,
"grad_norm": 1.1997839212417603,
"learning_rate": 8.782317883634182e-05,
"loss": 5.3512,
"step": 81000
},
{
"epoch": 3.4354845508578173,
"grad_norm": 1.1590864658355713,
"learning_rate": 8.773742657462591e-05,
"loss": 5.3463,
"step": 81500
},
{
"epoch": 3.456561143194368,
"grad_norm": 1.326513648033142,
"learning_rate": 8.765167431291e-05,
"loss": 5.3435,
"step": 82000
},
{
"epoch": 3.4776377355309194,
"grad_norm": 1.250290870666504,
"learning_rate": 8.756592205119411e-05,
"loss": 5.3396,
"step": 82500
},
{
"epoch": 3.4987143278674706,
"grad_norm": 1.1926816701889038,
"learning_rate": 8.748016978947821e-05,
"loss": 5.3367,
"step": 83000
},
{
"epoch": 3.5197909202040214,
"grad_norm": 1.1839298009872437,
"learning_rate": 8.73944175277623e-05,
"loss": 5.3318,
"step": 83500
},
{
"epoch": 3.540867512540572,
"grad_norm": 1.17379891872406,
"learning_rate": 8.730866526604639e-05,
"loss": 5.3313,
"step": 84000
},
{
"epoch": 3.5619441048771234,
"grad_norm": 1.1859002113342285,
"learning_rate": 8.72229130043305e-05,
"loss": 5.3267,
"step": 84500
},
{
"epoch": 3.5830206972136747,
"grad_norm": 1.1998724937438965,
"learning_rate": 8.713716074261459e-05,
"loss": 5.3239,
"step": 85000
},
{
"epoch": 3.6040972895502255,
"grad_norm": 1.1832150220870972,
"learning_rate": 8.705140848089869e-05,
"loss": 5.3212,
"step": 85500
},
{
"epoch": 3.6251738818867767,
"grad_norm": 1.1872453689575195,
"learning_rate": 8.696565621918278e-05,
"loss": 5.3106,
"step": 86000
},
{
"epoch": 3.6462504742233275,
"grad_norm": 1.1462026834487915,
"learning_rate": 8.687990395746688e-05,
"loss": 5.3116,
"step": 86500
},
{
"epoch": 3.6673270665598787,
"grad_norm": 1.175427794456482,
"learning_rate": 8.679415169575098e-05,
"loss": 5.3058,
"step": 87000
},
{
"epoch": 3.6884036588964295,
"grad_norm": 1.1673798561096191,
"learning_rate": 8.670839943403508e-05,
"loss": 5.3043,
"step": 87500
},
{
"epoch": 3.7094802512329808,
"grad_norm": 1.1754493713378906,
"learning_rate": 8.662264717231916e-05,
"loss": 5.3027,
"step": 88000
},
{
"epoch": 3.7305568435695315,
"grad_norm": 1.2064685821533203,
"learning_rate": 8.653689491060328e-05,
"loss": 5.2994,
"step": 88500
},
{
"epoch": 3.751633435906083,
"grad_norm": 1.1514681577682495,
"learning_rate": 8.645114264888737e-05,
"loss": 5.2976,
"step": 89000
},
{
"epoch": 3.7727100282426336,
"grad_norm": 1.1773837804794312,
"learning_rate": 8.636539038717146e-05,
"loss": 5.2898,
"step": 89500
},
{
"epoch": 3.793786620579185,
"grad_norm": 1.1770738363265991,
"learning_rate": 8.627963812545556e-05,
"loss": 5.2881,
"step": 90000
},
{
"epoch": 3.793786620579185,
"eval_accuracy": 0.6763202267587112,
"eval_loss": 4.999698162078857,
"eval_runtime": 304.1852,
"eval_samples_per_second": 396.817,
"eval_steps_per_second": 4.136,
"step": 90000
},
{
"epoch": 3.8148632129157356,
"grad_norm": 1.1322546005249023,
"learning_rate": 8.619388586373967e-05,
"loss": 5.2882,
"step": 90500
},
{
"epoch": 3.835939805252287,
"grad_norm": 1.1570018529891968,
"learning_rate": 8.610813360202375e-05,
"loss": 5.2789,
"step": 91000
},
{
"epoch": 3.857016397588838,
"grad_norm": 1.1710647344589233,
"learning_rate": 8.602238134030785e-05,
"loss": 5.2792,
"step": 91500
},
{
"epoch": 3.878092989925389,
"grad_norm": 1.163998007774353,
"learning_rate": 8.593662907859195e-05,
"loss": 5.2713,
"step": 92000
},
{
"epoch": 3.8991695822619397,
"grad_norm": 1.134411096572876,
"learning_rate": 8.585087681687605e-05,
"loss": 5.2733,
"step": 92500
},
{
"epoch": 3.920246174598491,
"grad_norm": 1.1597387790679932,
"learning_rate": 8.576512455516015e-05,
"loss": 5.2707,
"step": 93000
},
{
"epoch": 3.941322766935042,
"grad_norm": 1.1571540832519531,
"learning_rate": 8.567937229344425e-05,
"loss": 5.2651,
"step": 93500
},
{
"epoch": 3.962399359271593,
"grad_norm": 1.1673835515975952,
"learning_rate": 8.559362003172834e-05,
"loss": 5.2625,
"step": 94000
},
{
"epoch": 3.983475951608144,
"grad_norm": 1.1838788986206055,
"learning_rate": 8.550786777001244e-05,
"loss": 5.2593,
"step": 94500
},
{
"epoch": 4.004552543944695,
"grad_norm": 1.204729676246643,
"learning_rate": 8.542211550829654e-05,
"loss": 5.2551,
"step": 95000
},
{
"epoch": 4.025629136281246,
"grad_norm": 1.1893138885498047,
"learning_rate": 8.533636324658063e-05,
"loss": 5.2435,
"step": 95500
},
{
"epoch": 4.046705728617797,
"grad_norm": 1.1827294826507568,
"learning_rate": 8.525061098486474e-05,
"loss": 5.2481,
"step": 96000
},
{
"epoch": 4.067782320954348,
"grad_norm": 1.2582844495773315,
"learning_rate": 8.516485872314884e-05,
"loss": 5.2418,
"step": 96500
},
{
"epoch": 4.088858913290899,
"grad_norm": 1.2026429176330566,
"learning_rate": 8.507910646143292e-05,
"loss": 5.2404,
"step": 97000
},
{
"epoch": 4.10993550562745,
"grad_norm": 1.1976079940795898,
"learning_rate": 8.499335419971702e-05,
"loss": 5.2363,
"step": 97500
},
{
"epoch": 4.1310120979640015,
"grad_norm": 1.1782751083374023,
"learning_rate": 8.490760193800112e-05,
"loss": 5.2361,
"step": 98000
},
{
"epoch": 4.152088690300552,
"grad_norm": 1.196257472038269,
"learning_rate": 8.482184967628522e-05,
"loss": 5.2333,
"step": 98500
},
{
"epoch": 4.173165282637103,
"grad_norm": 1.1846442222595215,
"learning_rate": 8.473609741456931e-05,
"loss": 5.2339,
"step": 99000
},
{
"epoch": 4.194241874973654,
"grad_norm": 1.5651803016662598,
"learning_rate": 8.46503451528534e-05,
"loss": 5.2323,
"step": 99500
},
{
"epoch": 4.2153184673102055,
"grad_norm": 1.187256932258606,
"learning_rate": 8.456459289113751e-05,
"loss": 5.2269,
"step": 100000
},
{
"epoch": 4.2153184673102055,
"eval_accuracy": 0.683443336064149,
"eval_loss": 4.948752403259277,
"eval_runtime": 309.0764,
"eval_samples_per_second": 390.538,
"eval_steps_per_second": 4.07,
"step": 100000
},
{
"epoch": 4.236395059646756,
"grad_norm": 1.1802533864974976,
"learning_rate": 8.447884062942161e-05,
"loss": 5.228,
"step": 100500
},
{
"epoch": 4.257471651983307,
"grad_norm": 1.1904984712600708,
"learning_rate": 8.43930883677057e-05,
"loss": 5.2265,
"step": 101000
},
{
"epoch": 4.278548244319858,
"grad_norm": 1.2217313051223755,
"learning_rate": 8.430733610598979e-05,
"loss": 5.2197,
"step": 101500
},
{
"epoch": 4.29962483665641,
"grad_norm": 1.1821693181991577,
"learning_rate": 8.42215838442739e-05,
"loss": 5.2172,
"step": 102000
},
{
"epoch": 4.32070142899296,
"grad_norm": 1.1626853942871094,
"learning_rate": 8.413583158255799e-05,
"loss": 5.2173,
"step": 102500
},
{
"epoch": 4.341778021329511,
"grad_norm": 1.158477544784546,
"learning_rate": 8.405007932084209e-05,
"loss": 5.2122,
"step": 103000
},
{
"epoch": 4.362854613666062,
"grad_norm": 1.1607102155685425,
"learning_rate": 8.396432705912619e-05,
"loss": 5.2109,
"step": 103500
},
{
"epoch": 4.383931206002614,
"grad_norm": 1.216323971748352,
"learning_rate": 8.387857479741028e-05,
"loss": 5.2144,
"step": 104000
},
{
"epoch": 4.405007798339165,
"grad_norm": 1.172605037689209,
"learning_rate": 8.379282253569438e-05,
"loss": 5.2096,
"step": 104500
},
{
"epoch": 4.426084390675715,
"grad_norm": 1.1800463199615479,
"learning_rate": 8.370707027397848e-05,
"loss": 5.2054,
"step": 105000
},
{
"epoch": 4.4471609830122665,
"grad_norm": 1.1504831314086914,
"learning_rate": 8.362131801226258e-05,
"loss": 5.2043,
"step": 105500
},
{
"epoch": 4.468237575348818,
"grad_norm": 1.171962022781372,
"learning_rate": 8.353556575054668e-05,
"loss": 5.1998,
"step": 106000
},
{
"epoch": 4.489314167685369,
"grad_norm": 1.187751054763794,
"learning_rate": 8.344981348883077e-05,
"loss": 5.2015,
"step": 106500
},
{
"epoch": 4.510390760021919,
"grad_norm": 1.1708427667617798,
"learning_rate": 8.336406122711486e-05,
"loss": 5.197,
"step": 107000
},
{
"epoch": 4.5314673523584705,
"grad_norm": 1.1949230432510376,
"learning_rate": 8.327830896539897e-05,
"loss": 5.199,
"step": 107500
},
{
"epoch": 4.552543944695022,
"grad_norm": 1.175368309020996,
"learning_rate": 8.319255670368307e-05,
"loss": 5.1937,
"step": 108000
},
{
"epoch": 4.573620537031573,
"grad_norm": 1.1469533443450928,
"learning_rate": 8.310680444196715e-05,
"loss": 5.1915,
"step": 108500
},
{
"epoch": 4.594697129368123,
"grad_norm": 1.1633307933807373,
"learning_rate": 8.302105218025125e-05,
"loss": 5.1842,
"step": 109000
},
{
"epoch": 4.615773721704675,
"grad_norm": 1.206451654434204,
"learning_rate": 8.293529991853536e-05,
"loss": 5.1912,
"step": 109500
},
{
"epoch": 4.636850314041226,
"grad_norm": 1.174948811531067,
"learning_rate": 8.284954765681945e-05,
"loss": 5.1854,
"step": 110000
},
{
"epoch": 4.636850314041226,
"eval_accuracy": 0.6888989544407956,
"eval_loss": 4.9086737632751465,
"eval_runtime": 297.2375,
"eval_samples_per_second": 406.093,
"eval_steps_per_second": 4.232,
"step": 110000
},
{
"epoch": 4.657926906377777,
"grad_norm": 1.2132786512374878,
"learning_rate": 8.276379539510355e-05,
"loss": 5.1832,
"step": 110500
},
{
"epoch": 4.679003498714328,
"grad_norm": 1.1917670965194702,
"learning_rate": 8.267804313338765e-05,
"loss": 5.1826,
"step": 111000
},
{
"epoch": 4.700080091050879,
"grad_norm": 1.2040890455245972,
"learning_rate": 8.259229087167174e-05,
"loss": 5.1783,
"step": 111500
},
{
"epoch": 4.72115668338743,
"grad_norm": 1.2014081478118896,
"learning_rate": 8.250653860995584e-05,
"loss": 5.1761,
"step": 112000
},
{
"epoch": 4.742233275723981,
"grad_norm": 1.1992590427398682,
"learning_rate": 8.242078634823994e-05,
"loss": 5.1745,
"step": 112500
},
{
"epoch": 4.763309868060532,
"grad_norm": 1.1621612310409546,
"learning_rate": 8.233503408652403e-05,
"loss": 5.176,
"step": 113000
},
{
"epoch": 4.784386460397083,
"grad_norm": 1.1899240016937256,
"learning_rate": 8.224928182480814e-05,
"loss": 5.1767,
"step": 113500
},
{
"epoch": 4.805463052733634,
"grad_norm": 1.1715482473373413,
"learning_rate": 8.216352956309224e-05,
"loss": 5.1665,
"step": 114000
},
{
"epoch": 4.826539645070185,
"grad_norm": 1.2448362112045288,
"learning_rate": 8.207777730137632e-05,
"loss": 5.1667,
"step": 114500
},
{
"epoch": 4.847616237406736,
"grad_norm": 1.1955575942993164,
"learning_rate": 8.199202503966042e-05,
"loss": 5.1651,
"step": 115000
},
{
"epoch": 4.868692829743287,
"grad_norm": 1.2460631132125854,
"learning_rate": 8.190627277794453e-05,
"loss": 5.1664,
"step": 115500
},
{
"epoch": 4.889769422079838,
"grad_norm": 1.1886732578277588,
"learning_rate": 8.182052051622862e-05,
"loss": 5.1611,
"step": 116000
},
{
"epoch": 4.910846014416389,
"grad_norm": 1.2117810249328613,
"learning_rate": 8.173476825451271e-05,
"loss": 5.1605,
"step": 116500
},
{
"epoch": 4.9319226067529405,
"grad_norm": 1.1713303327560425,
"learning_rate": 8.164901599279681e-05,
"loss": 5.1629,
"step": 117000
},
{
"epoch": 4.952999199089491,
"grad_norm": 1.2223213911056519,
"learning_rate": 8.156326373108091e-05,
"loss": 5.159,
"step": 117500
},
{
"epoch": 4.974075791426042,
"grad_norm": 1.1646347045898438,
"learning_rate": 8.147751146936501e-05,
"loss": 5.1567,
"step": 118000
},
{
"epoch": 4.995152383762593,
"grad_norm": 1.3836407661437988,
"learning_rate": 8.139175920764911e-05,
"loss": 5.1535,
"step": 118500
},
{
"epoch": 5.0162289760991445,
"grad_norm": 1.1825565099716187,
"learning_rate": 8.13060069459332e-05,
"loss": 5.1553,
"step": 119000
},
{
"epoch": 5.037305568435695,
"grad_norm": 1.1905760765075684,
"learning_rate": 8.12202546842173e-05,
"loss": 5.1464,
"step": 119500
},
{
"epoch": 5.058382160772246,
"grad_norm": 1.2153085470199585,
"learning_rate": 8.11345024225014e-05,
"loss": 5.1474,
"step": 120000
},
{
"epoch": 5.058382160772246,
"eval_accuracy": 0.6943854168207115,
"eval_loss": 4.8727216720581055,
"eval_runtime": 297.1181,
"eval_samples_per_second": 406.256,
"eval_steps_per_second": 4.234,
"step": 120000
},
{
"epoch": 5.079458753108797,
"grad_norm": 1.2292251586914062,
"learning_rate": 8.104875016078549e-05,
"loss": 5.1424,
"step": 120500
},
{
"epoch": 5.100535345445349,
"grad_norm": 1.2002301216125488,
"learning_rate": 8.09629978990696e-05,
"loss": 5.1391,
"step": 121000
},
{
"epoch": 5.1216119377819,
"grad_norm": 1.1873189210891724,
"learning_rate": 8.08772456373537e-05,
"loss": 5.1402,
"step": 121500
},
{
"epoch": 5.14268853011845,
"grad_norm": 1.465786099433899,
"learning_rate": 8.079149337563778e-05,
"loss": 5.1385,
"step": 122000
},
{
"epoch": 5.163765122455001,
"grad_norm": 1.197754144668579,
"learning_rate": 8.070574111392188e-05,
"loss": 5.1355,
"step": 122500
},
{
"epoch": 5.184841714791553,
"grad_norm": 1.1691449880599976,
"learning_rate": 8.061998885220599e-05,
"loss": 5.1343,
"step": 123000
},
{
"epoch": 5.205918307128104,
"grad_norm": 1.167230248451233,
"learning_rate": 8.053423659049008e-05,
"loss": 5.1342,
"step": 123500
},
{
"epoch": 5.226994899464654,
"grad_norm": 1.2020764350891113,
"learning_rate": 8.044848432877418e-05,
"loss": 5.1342,
"step": 124000
},
{
"epoch": 5.2480714918012055,
"grad_norm": 1.158575177192688,
"learning_rate": 8.036273206705827e-05,
"loss": 5.1354,
"step": 124500
},
{
"epoch": 5.269148084137757,
"grad_norm": 1.2063792943954468,
"learning_rate": 8.027697980534237e-05,
"loss": 5.1285,
"step": 125000
},
{
"epoch": 5.290224676474308,
"grad_norm": 1.1922953128814697,
"learning_rate": 8.019122754362647e-05,
"loss": 5.1291,
"step": 125500
},
{
"epoch": 5.311301268810858,
"grad_norm": 1.2150864601135254,
"learning_rate": 8.010547528191057e-05,
"loss": 5.128,
"step": 126000
},
{
"epoch": 5.3323778611474095,
"grad_norm": 1.1848889589309692,
"learning_rate": 8.001972302019465e-05,
"loss": 5.1222,
"step": 126500
},
{
"epoch": 5.353454453483961,
"grad_norm": 1.1560989618301392,
"learning_rate": 7.993397075847876e-05,
"loss": 5.1217,
"step": 127000
},
{
"epoch": 5.374531045820512,
"grad_norm": 1.157912254333496,
"learning_rate": 7.984821849676286e-05,
"loss": 5.125,
"step": 127500
},
{
"epoch": 5.395607638157063,
"grad_norm": 1.206563949584961,
"learning_rate": 7.976246623504695e-05,
"loss": 5.1225,
"step": 128000
},
{
"epoch": 5.416684230493614,
"grad_norm": 1.1995681524276733,
"learning_rate": 7.967671397333105e-05,
"loss": 5.1179,
"step": 128500
},
{
"epoch": 5.437760822830165,
"grad_norm": 1.1490768194198608,
"learning_rate": 7.959096171161516e-05,
"loss": 5.1168,
"step": 129000
},
{
"epoch": 5.458837415166716,
"grad_norm": 1.199142336845398,
"learning_rate": 7.950520944989924e-05,
"loss": 5.1161,
"step": 129500
},
{
"epoch": 5.479914007503266,
"grad_norm": 1.1883599758148193,
"learning_rate": 7.941945718818334e-05,
"loss": 5.1144,
"step": 130000
},
{
"epoch": 5.479914007503266,
"eval_accuracy": 0.6989660965021444,
"eval_loss": 4.840433597564697,
"eval_runtime": 305.6654,
"eval_samples_per_second": 394.896,
"eval_steps_per_second": 4.116,
"step": 130000
},
{
"epoch": 5.500990599839818,
"grad_norm": 1.160034418106079,
"learning_rate": 7.933370492646744e-05,
"loss": 5.1192,
"step": 130500
},
{
"epoch": 5.522067192176369,
"grad_norm": 1.1609240770339966,
"learning_rate": 7.924795266475154e-05,
"loss": 5.1097,
"step": 131000
},
{
"epoch": 5.54314378451292,
"grad_norm": 1.213990569114685,
"learning_rate": 7.916220040303564e-05,
"loss": 5.1069,
"step": 131500
},
{
"epoch": 5.564220376849471,
"grad_norm": 2.005918264389038,
"learning_rate": 7.907644814131972e-05,
"loss": 5.1112,
"step": 132000
},
{
"epoch": 5.585296969186022,
"grad_norm": 1.1713415384292603,
"learning_rate": 7.899069587960383e-05,
"loss": 5.1044,
"step": 132500
},
{
"epoch": 5.606373561522573,
"grad_norm": 1.1729788780212402,
"learning_rate": 7.890494361788793e-05,
"loss": 5.106,
"step": 133000
},
{
"epoch": 5.627450153859124,
"grad_norm": 1.2382289171218872,
"learning_rate": 7.881919135617202e-05,
"loss": 5.1018,
"step": 133500
},
{
"epoch": 5.648526746195675,
"grad_norm": 1.1875584125518799,
"learning_rate": 7.873343909445611e-05,
"loss": 5.0999,
"step": 134000
},
{
"epoch": 5.669603338532226,
"grad_norm": 1.1801625490188599,
"learning_rate": 7.864768683274023e-05,
"loss": 5.1077,
"step": 134500
},
{
"epoch": 5.690679930868777,
"grad_norm": 3.7692482471466064,
"learning_rate": 7.856193457102431e-05,
"loss": 5.1041,
"step": 135000
},
{
"epoch": 5.711756523205328,
"grad_norm": 1.1847504377365112,
"learning_rate": 7.847618230930841e-05,
"loss": 5.0996,
"step": 135500
},
{
"epoch": 5.7328331155418795,
"grad_norm": 1.2074424028396606,
"learning_rate": 7.839043004759251e-05,
"loss": 5.1,
"step": 136000
},
{
"epoch": 5.75390970787843,
"grad_norm": 1.235308289527893,
"learning_rate": 7.83046777858766e-05,
"loss": 5.0962,
"step": 136500
},
{
"epoch": 5.774986300214981,
"grad_norm": 1.1780601739883423,
"learning_rate": 7.82189255241607e-05,
"loss": 5.0941,
"step": 137000
},
{
"epoch": 5.796062892551532,
"grad_norm": 1.1748501062393188,
"learning_rate": 7.81331732624448e-05,
"loss": 5.0957,
"step": 137500
},
{
"epoch": 5.8171394848880835,
"grad_norm": 1.1627594232559204,
"learning_rate": 7.804742100072889e-05,
"loss": 5.0912,
"step": 138000
},
{
"epoch": 5.838216077224635,
"grad_norm": 1.198976993560791,
"learning_rate": 7.7961668739013e-05,
"loss": 5.0951,
"step": 138500
},
{
"epoch": 5.859292669561185,
"grad_norm": 1.2051535844802856,
"learning_rate": 7.78759164772971e-05,
"loss": 5.09,
"step": 139000
},
{
"epoch": 5.880369261897736,
"grad_norm": 1.181911587715149,
"learning_rate": 7.779016421558118e-05,
"loss": 5.087,
"step": 139500
},
{
"epoch": 5.901445854234288,
"grad_norm": 1.1884944438934326,
"learning_rate": 7.770441195386528e-05,
"loss": 5.0914,
"step": 140000
},
{
"epoch": 5.901445854234288,
"eval_accuracy": 0.702577968803671,
"eval_loss": 4.813426494598389,
"eval_runtime": 298.3604,
"eval_samples_per_second": 404.564,
"eval_steps_per_second": 4.216,
"step": 140000
},
{
"epoch": 5.922522446570838,
"grad_norm": 1.1594266891479492,
"learning_rate": 7.761865969214939e-05,
"loss": 5.0809,
"step": 140500
},
{
"epoch": 5.943599038907389,
"grad_norm": 1.1909438371658325,
"learning_rate": 7.753290743043348e-05,
"loss": 5.0853,
"step": 141000
},
{
"epoch": 5.96467563124394,
"grad_norm": 1.195235252380371,
"learning_rate": 7.744715516871758e-05,
"loss": 5.0873,
"step": 141500
},
{
"epoch": 5.985752223580492,
"grad_norm": 1.2524954080581665,
"learning_rate": 7.736140290700167e-05,
"loss": 5.0805,
"step": 142000
},
{
"epoch": 6.006828815917043,
"grad_norm": 1.2422863245010376,
"learning_rate": 7.727565064528577e-05,
"loss": 5.0798,
"step": 142500
},
{
"epoch": 6.027905408253593,
"grad_norm": 1.2124429941177368,
"learning_rate": 7.718989838356987e-05,
"loss": 5.0764,
"step": 143000
},
{
"epoch": 6.0489820005901445,
"grad_norm": 1.2060351371765137,
"learning_rate": 7.710414612185397e-05,
"loss": 5.0766,
"step": 143500
},
{
"epoch": 6.070058592926696,
"grad_norm": 1.2051585912704468,
"learning_rate": 7.701839386013807e-05,
"loss": 5.0724,
"step": 144000
},
{
"epoch": 6.091135185263247,
"grad_norm": 1.1731454133987427,
"learning_rate": 7.693264159842217e-05,
"loss": 5.0708,
"step": 144500
},
{
"epoch": 6.112211777599797,
"grad_norm": 1.1780681610107422,
"learning_rate": 7.684688933670626e-05,
"loss": 5.0703,
"step": 145000
},
{
"epoch": 6.1332883699363485,
"grad_norm": 1.1927410364151,
"learning_rate": 7.676113707499035e-05,
"loss": 5.0719,
"step": 145500
},
{
"epoch": 6.1543649622729,
"grad_norm": 1.2030349969863892,
"learning_rate": 7.667538481327446e-05,
"loss": 5.0705,
"step": 146000
},
{
"epoch": 6.175441554609451,
"grad_norm": 1.2388361692428589,
"learning_rate": 7.658963255155856e-05,
"loss": 5.0707,
"step": 146500
},
{
"epoch": 6.196518146946001,
"grad_norm": 1.1591095924377441,
"learning_rate": 7.650388028984264e-05,
"loss": 5.0662,
"step": 147000
},
{
"epoch": 6.217594739282553,
"grad_norm": 1.2358752489089966,
"learning_rate": 7.641812802812674e-05,
"loss": 5.0652,
"step": 147500
},
{
"epoch": 6.238671331619104,
"grad_norm": 1.1408536434173584,
"learning_rate": 7.633237576641084e-05,
"loss": 5.0639,
"step": 148000
},
{
"epoch": 6.259747923955655,
"grad_norm": 1.2009227275848389,
"learning_rate": 7.624662350469494e-05,
"loss": 5.0665,
"step": 148500
},
{
"epoch": 6.280824516292206,
"grad_norm": 1.1837786436080933,
"learning_rate": 7.616087124297904e-05,
"loss": 5.0647,
"step": 149000
},
{
"epoch": 6.301901108628757,
"grad_norm": 1.2420834302902222,
"learning_rate": 7.607511898126313e-05,
"loss": 5.0633,
"step": 149500
},
{
"epoch": 6.322977700965308,
"grad_norm": 1.2403180599212646,
"learning_rate": 7.598936671954723e-05,
"loss": 5.0579,
"step": 150000
},
{
"epoch": 6.322977700965308,
"eval_accuracy": 0.705775915630452,
"eval_loss": 4.793546199798584,
"eval_runtime": 296.8512,
"eval_samples_per_second": 406.621,
"eval_steps_per_second": 4.238,
"step": 150000
},
{
"epoch": 6.344054293301859,
"grad_norm": 1.1882623434066772,
"learning_rate": 7.590361445783133e-05,
"loss": 5.0581,
"step": 150500
},
{
"epoch": 6.36513088563841,
"grad_norm": 1.1538300514221191,
"learning_rate": 7.581786219611543e-05,
"loss": 5.0584,
"step": 151000
},
{
"epoch": 6.386207477974961,
"grad_norm": 1.1798328161239624,
"learning_rate": 7.573210993439951e-05,
"loss": 5.0577,
"step": 151500
},
{
"epoch": 6.407284070311512,
"grad_norm": 1.1862596273422241,
"learning_rate": 7.564635767268363e-05,
"loss": 5.0566,
"step": 152000
},
{
"epoch": 6.428360662648063,
"grad_norm": 1.215518593788147,
"learning_rate": 7.556060541096772e-05,
"loss": 5.0528,
"step": 152500
},
{
"epoch": 6.449437254984614,
"grad_norm": 1.1789708137512207,
"learning_rate": 7.547485314925181e-05,
"loss": 5.0551,
"step": 153000
},
{
"epoch": 6.470513847321165,
"grad_norm": 1.892587661743164,
"learning_rate": 7.538910088753591e-05,
"loss": 5.0522,
"step": 153500
},
{
"epoch": 6.491590439657716,
"grad_norm": 1.2636245489120483,
"learning_rate": 7.530334862582002e-05,
"loss": 5.0534,
"step": 154000
},
{
"epoch": 6.512667031994267,
"grad_norm": 1.199180245399475,
"learning_rate": 7.52175963641041e-05,
"loss": 5.0535,
"step": 154500
},
{
"epoch": 6.5337436243308185,
"grad_norm": 1.1810476779937744,
"learning_rate": 7.51318441023882e-05,
"loss": 5.0552,
"step": 155000
},
{
"epoch": 6.55482021666737,
"grad_norm": 1.2165743112564087,
"learning_rate": 7.50460918406723e-05,
"loss": 5.05,
"step": 155500
},
{
"epoch": 6.57589680900392,
"grad_norm": 1.2037447690963745,
"learning_rate": 7.49603395789564e-05,
"loss": 5.0486,
"step": 156000
},
{
"epoch": 6.596973401340471,
"grad_norm": 1.1404774188995361,
"learning_rate": 7.48745873172405e-05,
"loss": 5.045,
"step": 156500
},
{
"epoch": 6.6180499936770225,
"grad_norm": 1.2633212804794312,
"learning_rate": 7.47888350555246e-05,
"loss": 5.041,
"step": 157000
},
{
"epoch": 6.639126586013573,
"grad_norm": 1.1896655559539795,
"learning_rate": 7.47030827938087e-05,
"loss": 5.0442,
"step": 157500
},
{
"epoch": 6.660203178350124,
"grad_norm": 1.1758646965026855,
"learning_rate": 7.461733053209279e-05,
"loss": 5.0454,
"step": 158000
},
{
"epoch": 6.681279770686675,
"grad_norm": 1.195341944694519,
"learning_rate": 7.453157827037689e-05,
"loss": 5.0476,
"step": 158500
},
{
"epoch": 6.702356363023227,
"grad_norm": 1.2168692350387573,
"learning_rate": 7.444582600866098e-05,
"loss": 5.0395,
"step": 159000
},
{
"epoch": 6.723432955359778,
"grad_norm": 1.2187185287475586,
"learning_rate": 7.436007374694507e-05,
"loss": 5.043,
"step": 159500
},
{
"epoch": 6.744509547696328,
"grad_norm": 1.22415030002594,
"learning_rate": 7.427432148522919e-05,
"loss": 5.044,
"step": 160000
},
{
"epoch": 6.744509547696328,
"eval_accuracy": 0.7089225487082808,
"eval_loss": 4.773594856262207,
"eval_runtime": 285.9872,
"eval_samples_per_second": 422.068,
"eval_steps_per_second": 4.399,
"step": 160000
},
{
"epoch": 6.765586140032879,
"grad_norm": 1.2073630094528198,
"learning_rate": 7.418856922351327e-05,
"loss": 5.0401,
"step": 160500
},
{
"epoch": 6.786662732369431,
"grad_norm": 1.2008872032165527,
"learning_rate": 7.410281696179737e-05,
"loss": 5.0371,
"step": 161000
},
{
"epoch": 6.807739324705982,
"grad_norm": 1.2028032541275024,
"learning_rate": 7.401706470008147e-05,
"loss": 5.0339,
"step": 161500
},
{
"epoch": 6.828815917042532,
"grad_norm": 1.1570894718170166,
"learning_rate": 7.393131243836557e-05,
"loss": 5.0337,
"step": 162000
},
{
"epoch": 6.8498925093790834,
"grad_norm": 1.1878418922424316,
"learning_rate": 7.384556017664966e-05,
"loss": 5.0318,
"step": 162500
},
{
"epoch": 6.870969101715635,
"grad_norm": 1.1808738708496094,
"learning_rate": 7.375980791493376e-05,
"loss": 5.0298,
"step": 163000
},
{
"epoch": 6.892045694052186,
"grad_norm": 1.2336087226867676,
"learning_rate": 7.367405565321786e-05,
"loss": 5.0292,
"step": 163500
},
{
"epoch": 6.913122286388736,
"grad_norm": 1.1665046215057373,
"learning_rate": 7.358830339150196e-05,
"loss": 5.0308,
"step": 164000
},
{
"epoch": 6.9341988787252875,
"grad_norm": 1.221134066581726,
"learning_rate": 7.350255112978606e-05,
"loss": 5.0341,
"step": 164500
},
{
"epoch": 6.955275471061839,
"grad_norm": 1.1897603273391724,
"learning_rate": 7.341679886807014e-05,
"loss": 5.0304,
"step": 165000
},
{
"epoch": 6.97635206339839,
"grad_norm": 1.2212783098220825,
"learning_rate": 7.333104660635425e-05,
"loss": 5.0272,
"step": 165500
},
{
"epoch": 6.997428655734941,
"grad_norm": 1.1694045066833496,
"learning_rate": 7.324529434463835e-05,
"loss": 5.0289,
"step": 166000
},
{
"epoch": 7.018505248071492,
"grad_norm": 1.2230149507522583,
"learning_rate": 7.315954208292244e-05,
"loss": 5.0241,
"step": 166500
},
{
"epoch": 7.039581840408043,
"grad_norm": 1.1635024547576904,
"learning_rate": 7.307378982120653e-05,
"loss": 5.0184,
"step": 167000
},
{
"epoch": 7.060658432744594,
"grad_norm": 1.18597412109375,
"learning_rate": 7.298803755949063e-05,
"loss": 5.019,
"step": 167500
},
{
"epoch": 7.081735025081145,
"grad_norm": 1.2210392951965332,
"learning_rate": 7.290228529777473e-05,
"loss": 5.0179,
"step": 168000
},
{
"epoch": 7.102811617417696,
"grad_norm": 1.2056702375411987,
"learning_rate": 7.281653303605883e-05,
"loss": 5.0195,
"step": 168500
},
{
"epoch": 7.123888209754247,
"grad_norm": 1.263798475265503,
"learning_rate": 7.273078077434293e-05,
"loss": 5.0239,
"step": 169000
},
{
"epoch": 7.144964802090798,
"grad_norm": 1.214843988418579,
"learning_rate": 7.264502851262703e-05,
"loss": 5.0149,
"step": 169500
},
{
"epoch": 7.166041394427349,
"grad_norm": 1.1923555135726929,
"learning_rate": 7.255927625091112e-05,
"loss": 5.0124,
"step": 170000
},
{
"epoch": 7.166041394427349,
"eval_accuracy": 0.712388886022229,
"eval_loss": 4.753279685974121,
"eval_runtime": 287.306,
"eval_samples_per_second": 420.131,
"eval_steps_per_second": 4.379,
"step": 170000
},
{
"epoch": 7.1871179867639,
"grad_norm": 1.199080467224121,
"learning_rate": 7.247352398919521e-05,
"loss": 5.0165,
"step": 170500
},
{
"epoch": 7.208194579100451,
"grad_norm": 1.1971572637557983,
"learning_rate": 7.238777172747931e-05,
"loss": 5.014,
"step": 171000
},
{
"epoch": 7.229271171437002,
"grad_norm": 1.1801815032958984,
"learning_rate": 7.230201946576342e-05,
"loss": 5.0172,
"step": 171500
},
{
"epoch": 7.250347763773553,
"grad_norm": 1.207678198814392,
"learning_rate": 7.22162672040475e-05,
"loss": 5.0151,
"step": 172000
},
{
"epoch": 7.271424356110104,
"grad_norm": 1.196822166442871,
"learning_rate": 7.21305149423316e-05,
"loss": 5.0126,
"step": 172500
},
{
"epoch": 7.292500948446655,
"grad_norm": 1.1575946807861328,
"learning_rate": 7.20447626806157e-05,
"loss": 5.0075,
"step": 173000
},
{
"epoch": 7.313577540783206,
"grad_norm": 1.224609613418579,
"learning_rate": 7.19590104188998e-05,
"loss": 5.0086,
"step": 173500
},
{
"epoch": 7.3346541331197574,
"grad_norm": 1.185877799987793,
"learning_rate": 7.18732581571839e-05,
"loss": 5.0076,
"step": 174000
},
{
"epoch": 7.355730725456308,
"grad_norm": 1.1947156190872192,
"learning_rate": 7.1787505895468e-05,
"loss": 5.0074,
"step": 174500
},
{
"epoch": 7.376807317792859,
"grad_norm": 1.232859492301941,
"learning_rate": 7.17017536337521e-05,
"loss": 5.0085,
"step": 175000
},
{
"epoch": 7.39788391012941,
"grad_norm": 1.1783876419067383,
"learning_rate": 7.161600137203619e-05,
"loss": 5.0105,
"step": 175500
},
{
"epoch": 7.4189605024659615,
"grad_norm": 1.210338830947876,
"learning_rate": 7.153024911032029e-05,
"loss": 5.0038,
"step": 176000
},
{
"epoch": 7.440037094802513,
"grad_norm": 1.3607730865478516,
"learning_rate": 7.144449684860438e-05,
"loss": 5.0074,
"step": 176500
},
{
"epoch": 7.461113687139063,
"grad_norm": 1.2329246997833252,
"learning_rate": 7.135874458688849e-05,
"loss": 5.0084,
"step": 177000
},
{
"epoch": 7.482190279475614,
"grad_norm": 1.2359158992767334,
"learning_rate": 7.127299232517259e-05,
"loss": 5.007,
"step": 177500
},
{
"epoch": 7.503266871812166,
"grad_norm": 1.1867552995681763,
"learning_rate": 7.118724006345667e-05,
"loss": 5.0038,
"step": 178000
},
{
"epoch": 7.524343464148717,
"grad_norm": 1.2137891054153442,
"learning_rate": 7.110148780174077e-05,
"loss": 4.9989,
"step": 178500
},
{
"epoch": 7.545420056485267,
"grad_norm": 1.2216790914535522,
"learning_rate": 7.101573554002488e-05,
"loss": 5.0,
"step": 179000
},
{
"epoch": 7.566496648821818,
"grad_norm": 1.2051630020141602,
"learning_rate": 7.092998327830897e-05,
"loss": 5.005,
"step": 179500
},
{
"epoch": 7.58757324115837,
"grad_norm": 1.5784205198287964,
"learning_rate": 7.084423101659306e-05,
"loss": 4.9997,
"step": 180000
},
{
"epoch": 7.58757324115837,
"eval_accuracy": 0.71398575919604,
"eval_loss": 4.743103504180908,
"eval_runtime": 288.0118,
"eval_samples_per_second": 419.101,
"eval_steps_per_second": 4.368,
"step": 180000
},
{
"epoch": 7.608649833494921,
"grad_norm": 1.2161498069763184,
"learning_rate": 7.075847875487716e-05,
"loss": 4.9987,
"step": 180500
},
{
"epoch": 7.629726425831471,
"grad_norm": 1.2316025495529175,
"learning_rate": 7.067272649316126e-05,
"loss": 5.0006,
"step": 181000
},
{
"epoch": 7.650803018168022,
"grad_norm": 1.1802855730056763,
"learning_rate": 7.058697423144536e-05,
"loss": 4.9949,
"step": 181500
},
{
"epoch": 7.671879610504574,
"grad_norm": 1.1895751953125,
"learning_rate": 7.050122196972946e-05,
"loss": 4.9956,
"step": 182000
},
{
"epoch": 7.692956202841125,
"grad_norm": 1.216373324394226,
"learning_rate": 7.041546970801354e-05,
"loss": 4.9955,
"step": 182500
},
{
"epoch": 7.714032795177676,
"grad_norm": 1.2029701471328735,
"learning_rate": 7.032971744629765e-05,
"loss": 4.9955,
"step": 183000
},
{
"epoch": 7.7351093875142265,
"grad_norm": 1.1813932657241821,
"learning_rate": 7.024396518458175e-05,
"loss": 4.9984,
"step": 183500
},
{
"epoch": 7.756185979850778,
"grad_norm": 1.2041866779327393,
"learning_rate": 7.015821292286584e-05,
"loss": 4.9942,
"step": 184000
},
{
"epoch": 7.777262572187329,
"grad_norm": 1.2447139024734497,
"learning_rate": 7.007246066114993e-05,
"loss": 4.9936,
"step": 184500
},
{
"epoch": 7.798339164523879,
"grad_norm": 1.2005633115768433,
"learning_rate": 6.998670839943405e-05,
"loss": 4.9922,
"step": 185000
},
{
"epoch": 7.8194157568604306,
"grad_norm": 1.2316288948059082,
"learning_rate": 6.990095613771813e-05,
"loss": 4.9899,
"step": 185500
},
{
"epoch": 7.840492349196982,
"grad_norm": 1.2141095399856567,
"learning_rate": 6.981520387600223e-05,
"loss": 4.9909,
"step": 186000
},
{
"epoch": 7.861568941533533,
"grad_norm": 1.1894772052764893,
"learning_rate": 6.972945161428633e-05,
"loss": 4.9891,
"step": 186500
},
{
"epoch": 7.882645533870084,
"grad_norm": 1.174543023109436,
"learning_rate": 6.964369935257043e-05,
"loss": 4.9902,
"step": 187000
},
{
"epoch": 7.903722126206635,
"grad_norm": 1.1652605533599854,
"learning_rate": 6.955794709085452e-05,
"loss": 4.9924,
"step": 187500
},
{
"epoch": 7.924798718543186,
"grad_norm": 1.1803480386734009,
"learning_rate": 6.947219482913862e-05,
"loss": 4.9794,
"step": 188000
},
{
"epoch": 7.945875310879737,
"grad_norm": 1.2102735042572021,
"learning_rate": 6.938644256742272e-05,
"loss": 4.9878,
"step": 188500
},
{
"epoch": 7.966951903216288,
"grad_norm": 1.2214703559875488,
"learning_rate": 6.930069030570682e-05,
"loss": 4.9856,
"step": 189000
},
{
"epoch": 7.988028495552839,
"grad_norm": 1.1654741764068604,
"learning_rate": 6.921493804399092e-05,
"loss": 4.9816,
"step": 189500
},
{
"epoch": 8.00910508788939,
"grad_norm": 1.260227084159851,
"learning_rate": 6.9129185782275e-05,
"loss": 4.9809,
"step": 190000
},
{
"epoch": 8.00910508788939,
"eval_accuracy": 0.7163024714084301,
"eval_loss": 4.722395420074463,
"eval_runtime": 283.8008,
"eval_samples_per_second": 425.319,
"eval_steps_per_second": 4.433,
"step": 190000
},
{
"epoch": 8.03018168022594,
"grad_norm": 1.2019314765930176,
"learning_rate": 6.904343352055911e-05,
"loss": 4.9764,
"step": 190500
},
{
"epoch": 8.051258272562492,
"grad_norm": 1.2193270921707153,
"learning_rate": 6.895768125884321e-05,
"loss": 4.98,
"step": 191000
},
{
"epoch": 8.072334864899043,
"grad_norm": 1.1981379985809326,
"learning_rate": 6.88719289971273e-05,
"loss": 4.977,
"step": 191500
},
{
"epoch": 8.093411457235595,
"grad_norm": 1.2269775867462158,
"learning_rate": 6.87861767354114e-05,
"loss": 4.9718,
"step": 192000
},
{
"epoch": 8.114488049572145,
"grad_norm": 1.2561153173446655,
"learning_rate": 6.870042447369551e-05,
"loss": 4.9785,
"step": 192500
},
{
"epoch": 8.135564641908696,
"grad_norm": 1.276261329650879,
"learning_rate": 6.861467221197959e-05,
"loss": 4.9771,
"step": 193000
},
{
"epoch": 8.156641234245248,
"grad_norm": 1.2613970041275024,
"learning_rate": 6.852891995026369e-05,
"loss": 4.9778,
"step": 193500
},
{
"epoch": 8.177717826581798,
"grad_norm": 1.1926409006118774,
"learning_rate": 6.844316768854779e-05,
"loss": 4.9784,
"step": 194000
},
{
"epoch": 8.19879441891835,
"grad_norm": 1.2153599262237549,
"learning_rate": 6.835741542683189e-05,
"loss": 4.9753,
"step": 194500
},
{
"epoch": 8.2198710112549,
"grad_norm": 1.240102767944336,
"learning_rate": 6.827166316511599e-05,
"loss": 4.9704,
"step": 195000
},
{
"epoch": 8.24094760359145,
"grad_norm": 1.2002017498016357,
"learning_rate": 6.818591090340008e-05,
"loss": 4.9752,
"step": 195500
},
{
"epoch": 8.262024195928003,
"grad_norm": 1.1688752174377441,
"learning_rate": 6.810015864168417e-05,
"loss": 4.9706,
"step": 196000
},
{
"epoch": 8.283100788264553,
"grad_norm": 1.2108333110809326,
"learning_rate": 6.801440637996828e-05,
"loss": 4.9686,
"step": 196500
},
{
"epoch": 8.304177380601104,
"grad_norm": 1.2194222211837769,
"learning_rate": 6.792865411825238e-05,
"loss": 4.9724,
"step": 197000
},
{
"epoch": 8.325253972937656,
"grad_norm": 1.185798168182373,
"learning_rate": 6.784290185653646e-05,
"loss": 4.9696,
"step": 197500
},
{
"epoch": 8.346330565274206,
"grad_norm": 1.2635873556137085,
"learning_rate": 6.775714959482056e-05,
"loss": 4.9685,
"step": 198000
},
{
"epoch": 8.367407157610758,
"grad_norm": 1.1899186372756958,
"learning_rate": 6.767139733310467e-05,
"loss": 4.971,
"step": 198500
},
{
"epoch": 8.388483749947309,
"grad_norm": 1.2183247804641724,
"learning_rate": 6.758564507138876e-05,
"loss": 4.9725,
"step": 199000
},
{
"epoch": 8.409560342283859,
"grad_norm": 1.226076602935791,
"learning_rate": 6.749989280967286e-05,
"loss": 4.9715,
"step": 199500
},
{
"epoch": 8.430636934620411,
"grad_norm": 1.2133668661117554,
"learning_rate": 6.741414054795696e-05,
"loss": 4.9674,
"step": 200000
},
{
"epoch": 8.430636934620411,
"eval_accuracy": 0.7188661165895447,
"eval_loss": 4.708712100982666,
"eval_runtime": 284.3475,
"eval_samples_per_second": 424.502,
"eval_steps_per_second": 4.424,
"step": 200000
},
{
"epoch": 8.451713526956961,
"grad_norm": 1.1907964944839478,
"learning_rate": 6.732838828624105e-05,
"loss": 4.9658,
"step": 200500
},
{
"epoch": 8.472790119293512,
"grad_norm": 1.2309699058532715,
"learning_rate": 6.724263602452515e-05,
"loss": 4.9666,
"step": 201000
},
{
"epoch": 8.493866711630064,
"grad_norm": 1.2191189527511597,
"learning_rate": 6.715688376280924e-05,
"loss": 4.9644,
"step": 201500
},
{
"epoch": 8.514943303966614,
"grad_norm": 1.2087974548339844,
"learning_rate": 6.707113150109335e-05,
"loss": 4.9634,
"step": 202000
},
{
"epoch": 8.536019896303166,
"grad_norm": 1.2400072813034058,
"learning_rate": 6.698537923937745e-05,
"loss": 4.9689,
"step": 202500
},
{
"epoch": 8.557096488639717,
"grad_norm": 1.2195491790771484,
"learning_rate": 6.689962697766153e-05,
"loss": 4.9639,
"step": 203000
},
{
"epoch": 8.578173080976267,
"grad_norm": 1.2442820072174072,
"learning_rate": 6.681387471594563e-05,
"loss": 4.9634,
"step": 203500
},
{
"epoch": 8.59924967331282,
"grad_norm": 1.3057812452316284,
"learning_rate": 6.672812245422974e-05,
"loss": 4.9632,
"step": 204000
},
{
"epoch": 8.62032626564937,
"grad_norm": 1.199554204940796,
"learning_rate": 6.664237019251383e-05,
"loss": 4.963,
"step": 204500
},
{
"epoch": 8.64140285798592,
"grad_norm": 1.4804085493087769,
"learning_rate": 6.655661793079792e-05,
"loss": 4.9633,
"step": 205000
},
{
"epoch": 8.662479450322472,
"grad_norm": 1.2386500835418701,
"learning_rate": 6.647086566908202e-05,
"loss": 4.9586,
"step": 205500
},
{
"epoch": 8.683556042659022,
"grad_norm": 1.2068346738815308,
"learning_rate": 6.638511340736612e-05,
"loss": 4.9579,
"step": 206000
},
{
"epoch": 8.704632634995574,
"grad_norm": 1.2305176258087158,
"learning_rate": 6.629936114565022e-05,
"loss": 4.9552,
"step": 206500
},
{
"epoch": 8.725709227332125,
"grad_norm": 1.250844120979309,
"learning_rate": 6.621360888393432e-05,
"loss": 4.9573,
"step": 207000
},
{
"epoch": 8.746785819668675,
"grad_norm": 1.1921871900558472,
"learning_rate": 6.61278566222184e-05,
"loss": 4.951,
"step": 207500
},
{
"epoch": 8.767862412005227,
"grad_norm": 1.185805320739746,
"learning_rate": 6.604210436050251e-05,
"loss": 4.9554,
"step": 208000
},
{
"epoch": 8.788939004341778,
"grad_norm": 1.1795355081558228,
"learning_rate": 6.595635209878661e-05,
"loss": 4.9569,
"step": 208500
},
{
"epoch": 8.81001559667833,
"grad_norm": 1.1965314149856567,
"learning_rate": 6.58705998370707e-05,
"loss": 4.96,
"step": 209000
},
{
"epoch": 8.83109218901488,
"grad_norm": 1.2050001621246338,
"learning_rate": 6.57848475753548e-05,
"loss": 4.9561,
"step": 209500
},
{
"epoch": 8.85216878135143,
"grad_norm": 1.210486888885498,
"learning_rate": 6.569909531363891e-05,
"loss": 4.9526,
"step": 210000
},
{
"epoch": 8.85216878135143,
"eval_accuracy": 0.720603312531913,
"eval_loss": 4.69759464263916,
"eval_runtime": 283.2126,
"eval_samples_per_second": 426.203,
"eval_steps_per_second": 4.442,
"step": 210000
},
{
"epoch": 8.873245373687983,
"grad_norm": 1.2181248664855957,
"learning_rate": 6.561334305192299e-05,
"loss": 4.953,
"step": 210500
},
{
"epoch": 8.894321966024533,
"grad_norm": 1.2479008436203003,
"learning_rate": 6.552759079020709e-05,
"loss": 4.955,
"step": 211000
},
{
"epoch": 8.915398558361083,
"grad_norm": 1.2124124765396118,
"learning_rate": 6.544183852849119e-05,
"loss": 4.9496,
"step": 211500
},
{
"epoch": 8.936475150697635,
"grad_norm": 1.1955808401107788,
"learning_rate": 6.535608626677529e-05,
"loss": 4.9573,
"step": 212000
},
{
"epoch": 8.957551743034186,
"grad_norm": 1.2251760959625244,
"learning_rate": 6.527033400505939e-05,
"loss": 4.9549,
"step": 212500
},
{
"epoch": 8.978628335370738,
"grad_norm": 1.2039116621017456,
"learning_rate": 6.518458174334348e-05,
"loss": 4.9546,
"step": 213000
},
{
"epoch": 8.999704927707288,
"grad_norm": 1.2311803102493286,
"learning_rate": 6.509882948162758e-05,
"loss": 4.9513,
"step": 213500
},
{
"epoch": 9.020781520043839,
"grad_norm": 1.3742305040359497,
"learning_rate": 6.501307721991168e-05,
"loss": 4.947,
"step": 214000
},
{
"epoch": 9.04185811238039,
"grad_norm": 1.2510732412338257,
"learning_rate": 6.492732495819578e-05,
"loss": 4.9452,
"step": 214500
},
{
"epoch": 9.062934704716941,
"grad_norm": 1.2256864309310913,
"learning_rate": 6.484157269647986e-05,
"loss": 4.9471,
"step": 215000
},
{
"epoch": 9.084011297053493,
"grad_norm": 1.2123686075210571,
"learning_rate": 6.475582043476398e-05,
"loss": 4.9474,
"step": 215500
},
{
"epoch": 9.105087889390044,
"grad_norm": 1.2280921936035156,
"learning_rate": 6.467006817304807e-05,
"loss": 4.9426,
"step": 216000
},
{
"epoch": 9.126164481726594,
"grad_norm": 1.191518783569336,
"learning_rate": 6.458431591133216e-05,
"loss": 4.9368,
"step": 216500
},
{
"epoch": 9.147241074063146,
"grad_norm": 1.2150710821151733,
"learning_rate": 6.449856364961626e-05,
"loss": 4.9422,
"step": 217000
},
{
"epoch": 9.168317666399696,
"grad_norm": 1.2126309871673584,
"learning_rate": 6.441281138790037e-05,
"loss": 4.9375,
"step": 217500
},
{
"epoch": 9.189394258736247,
"grad_norm": 1.2119472026824951,
"learning_rate": 6.432705912618445e-05,
"loss": 4.9405,
"step": 218000
},
{
"epoch": 9.210470851072799,
"grad_norm": 1.1966376304626465,
"learning_rate": 6.424130686446855e-05,
"loss": 4.9421,
"step": 218500
},
{
"epoch": 9.23154744340935,
"grad_norm": 1.269813060760498,
"learning_rate": 6.415555460275265e-05,
"loss": 4.9402,
"step": 219000
},
{
"epoch": 9.252624035745901,
"grad_norm": 1.2053613662719727,
"learning_rate": 6.406980234103675e-05,
"loss": 4.9409,
"step": 219500
},
{
"epoch": 9.273700628082452,
"grad_norm": 1.2345422506332397,
"learning_rate": 6.398405007932085e-05,
"loss": 4.9422,
"step": 220000
},
{
"epoch": 9.273700628082452,
"eval_accuracy": 0.7224109621486899,
"eval_loss": 4.684810638427734,
"eval_runtime": 284.8872,
"eval_samples_per_second": 423.698,
"eval_steps_per_second": 4.416,
"step": 220000
},
{
"epoch": 9.294777220419002,
"grad_norm": 1.2063605785369873,
"learning_rate": 6.389829781760495e-05,
"loss": 4.94,
"step": 220500
},
{
"epoch": 9.315853812755554,
"grad_norm": 1.2543971538543701,
"learning_rate": 6.381254555588903e-05,
"loss": 4.9413,
"step": 221000
},
{
"epoch": 9.336930405092104,
"grad_norm": 1.22481107711792,
"learning_rate": 6.372679329417314e-05,
"loss": 4.9356,
"step": 221500
},
{
"epoch": 9.358006997428657,
"grad_norm": 1.215613842010498,
"learning_rate": 6.364104103245724e-05,
"loss": 4.9351,
"step": 222000
},
{
"epoch": 9.379083589765207,
"grad_norm": 1.1932727098464966,
"learning_rate": 6.355528877074132e-05,
"loss": 4.9387,
"step": 222500
},
{
"epoch": 9.400160182101757,
"grad_norm": 1.2185845375061035,
"learning_rate": 6.346953650902542e-05,
"loss": 4.937,
"step": 223000
},
{
"epoch": 9.42123677443831,
"grad_norm": 1.2246685028076172,
"learning_rate": 6.338378424730954e-05,
"loss": 4.936,
"step": 223500
},
{
"epoch": 9.44231336677486,
"grad_norm": 1.2069498300552368,
"learning_rate": 6.329803198559362e-05,
"loss": 4.9324,
"step": 224000
},
{
"epoch": 9.46338995911141,
"grad_norm": 1.2347995042800903,
"learning_rate": 6.321227972387772e-05,
"loss": 4.9315,
"step": 224500
},
{
"epoch": 9.484466551447962,
"grad_norm": 1.2425357103347778,
"learning_rate": 6.312652746216182e-05,
"loss": 4.9287,
"step": 225000
},
{
"epoch": 9.505543143784513,
"grad_norm": 1.204967737197876,
"learning_rate": 6.304077520044591e-05,
"loss": 4.9388,
"step": 225500
},
{
"epoch": 9.526619736121065,
"grad_norm": 1.2299506664276123,
"learning_rate": 6.295502293873001e-05,
"loss": 4.932,
"step": 226000
},
{
"epoch": 9.547696328457615,
"grad_norm": 1.2519145011901855,
"learning_rate": 6.286927067701411e-05,
"loss": 4.9289,
"step": 226500
},
{
"epoch": 9.568772920794165,
"grad_norm": 1.22359037399292,
"learning_rate": 6.278351841529821e-05,
"loss": 4.9319,
"step": 227000
},
{
"epoch": 9.589849513130718,
"grad_norm": 1.2176817655563354,
"learning_rate": 6.269776615358231e-05,
"loss": 4.9308,
"step": 227500
},
{
"epoch": 9.610926105467268,
"grad_norm": 1.212934136390686,
"learning_rate": 6.26120138918664e-05,
"loss": 4.9296,
"step": 228000
},
{
"epoch": 9.63200269780382,
"grad_norm": 1.2088513374328613,
"learning_rate": 6.252626163015049e-05,
"loss": 4.9287,
"step": 228500
},
{
"epoch": 9.65307929014037,
"grad_norm": 1.2101627588272095,
"learning_rate": 6.24405093684346e-05,
"loss": 4.9268,
"step": 229000
},
{
"epoch": 9.67415588247692,
"grad_norm": 1.1800097227096558,
"learning_rate": 6.23547571067187e-05,
"loss": 4.9259,
"step": 229500
},
{
"epoch": 9.695232474813473,
"grad_norm": 1.2317137718200684,
"learning_rate": 6.226900484500279e-05,
"loss": 4.9232,
"step": 230000
},
{
"epoch": 9.695232474813473,
"eval_accuracy": 0.724169210745622,
"eval_loss": 4.6720871925354,
"eval_runtime": 283.362,
"eval_samples_per_second": 425.978,
"eval_steps_per_second": 4.44,
"step": 230000
},
{
"epoch": 9.716309067150023,
"grad_norm": 1.224730372428894,
"learning_rate": 6.218325258328688e-05,
"loss": 4.9323,
"step": 230500
},
{
"epoch": 9.737385659486574,
"grad_norm": 1.1969786882400513,
"learning_rate": 6.209750032157098e-05,
"loss": 4.9274,
"step": 231000
},
{
"epoch": 9.758462251823126,
"grad_norm": 1.2646855115890503,
"learning_rate": 6.201174805985508e-05,
"loss": 4.9251,
"step": 231500
},
{
"epoch": 9.779538844159676,
"grad_norm": 1.2864656448364258,
"learning_rate": 6.192599579813918e-05,
"loss": 4.9254,
"step": 232000
},
{
"epoch": 9.800615436496226,
"grad_norm": 1.202122688293457,
"learning_rate": 6.184024353642328e-05,
"loss": 4.9238,
"step": 232500
},
{
"epoch": 9.821692028832778,
"grad_norm": 1.205916166305542,
"learning_rate": 6.175449127470738e-05,
"loss": 4.9208,
"step": 233000
},
{
"epoch": 9.842768621169329,
"grad_norm": 1.2133389711380005,
"learning_rate": 6.166873901299147e-05,
"loss": 4.9263,
"step": 233500
},
{
"epoch": 9.863845213505881,
"grad_norm": 1.1908735036849976,
"learning_rate": 6.158298675127557e-05,
"loss": 4.9209,
"step": 234000
},
{
"epoch": 9.884921805842431,
"grad_norm": 1.221973180770874,
"learning_rate": 6.149723448955966e-05,
"loss": 4.9245,
"step": 234500
},
{
"epoch": 9.905998398178982,
"grad_norm": 1.2121721506118774,
"learning_rate": 6.141148222784377e-05,
"loss": 4.9181,
"step": 235000
},
{
"epoch": 9.927074990515534,
"grad_norm": 1.2001324892044067,
"learning_rate": 6.132572996612787e-05,
"loss": 4.9187,
"step": 235500
},
{
"epoch": 9.948151582852084,
"grad_norm": 1.2283519506454468,
"learning_rate": 6.123997770441195e-05,
"loss": 4.9209,
"step": 236000
},
{
"epoch": 9.969228175188636,
"grad_norm": 1.2340632677078247,
"learning_rate": 6.115422544269605e-05,
"loss": 4.9252,
"step": 236500
},
{
"epoch": 9.990304767525187,
"grad_norm": 1.210125207901001,
"learning_rate": 6.106847318098015e-05,
"loss": 4.9218,
"step": 237000
},
{
"epoch": 10.011381359861737,
"grad_norm": 1.1647775173187256,
"learning_rate": 6.098272091926425e-05,
"loss": 4.9199,
"step": 237500
},
{
"epoch": 10.032457952198289,
"grad_norm": 1.2153302431106567,
"learning_rate": 6.0896968657548345e-05,
"loss": 4.9163,
"step": 238000
},
{
"epoch": 10.05353454453484,
"grad_norm": 1.2124123573303223,
"learning_rate": 6.081121639583244e-05,
"loss": 4.9127,
"step": 238500
},
{
"epoch": 10.07461113687139,
"grad_norm": 1.2328394651412964,
"learning_rate": 6.072546413411654e-05,
"loss": 4.9143,
"step": 239000
},
{
"epoch": 10.095687729207942,
"grad_norm": 1.2011830806732178,
"learning_rate": 6.063971187240064e-05,
"loss": 4.915,
"step": 239500
},
{
"epoch": 10.116764321544492,
"grad_norm": 1.189602255821228,
"learning_rate": 6.055395961068473e-05,
"loss": 4.9073,
"step": 240000
},
{
"epoch": 10.116764321544492,
"eval_accuracy": 0.7260845106937713,
"eval_loss": 4.664577960968018,
"eval_runtime": 288.7196,
"eval_samples_per_second": 418.073,
"eval_steps_per_second": 4.357,
"step": 240000
},
{
"epoch": 10.137840913881044,
"grad_norm": 1.2191969156265259,
"learning_rate": 6.046820734896883e-05,
"loss": 4.9101,
"step": 240500
},
{
"epoch": 10.158917506217595,
"grad_norm": 1.2503254413604736,
"learning_rate": 6.038245508725293e-05,
"loss": 4.9126,
"step": 241000
},
{
"epoch": 10.179994098554145,
"grad_norm": 1.1884126663208008,
"learning_rate": 6.029670282553702e-05,
"loss": 4.9106,
"step": 241500
},
{
"epoch": 10.201070690890697,
"grad_norm": 1.2082287073135376,
"learning_rate": 6.0210950563821125e-05,
"loss": 4.9133,
"step": 242000
},
{
"epoch": 10.222147283227248,
"grad_norm": 1.2135977745056152,
"learning_rate": 6.0125198302105223e-05,
"loss": 4.9108,
"step": 242500
},
{
"epoch": 10.2432238755638,
"grad_norm": 1.2264249324798584,
"learning_rate": 6.0039446040389315e-05,
"loss": 4.9084,
"step": 243000
},
{
"epoch": 10.26430046790035,
"grad_norm": 1.2336091995239258,
"learning_rate": 5.995369377867341e-05,
"loss": 4.9177,
"step": 243500
},
{
"epoch": 10.2853770602369,
"grad_norm": 1.2173370122909546,
"learning_rate": 5.986794151695752e-05,
"loss": 4.9059,
"step": 244000
},
{
"epoch": 10.306453652573452,
"grad_norm": 1.2234634160995483,
"learning_rate": 5.97821892552416e-05,
"loss": 4.9117,
"step": 244500
},
{
"epoch": 10.327530244910003,
"grad_norm": 1.1887719631195068,
"learning_rate": 5.969643699352571e-05,
"loss": 4.9102,
"step": 245000
},
{
"epoch": 10.348606837246553,
"grad_norm": 1.2119560241699219,
"learning_rate": 5.9610684731809806e-05,
"loss": 4.9091,
"step": 245500
},
{
"epoch": 10.369683429583105,
"grad_norm": 1.1805379390716553,
"learning_rate": 5.95249324700939e-05,
"loss": 4.9068,
"step": 246000
},
{
"epoch": 10.390760021919656,
"grad_norm": 1.2259935140609741,
"learning_rate": 5.9439180208377996e-05,
"loss": 4.9075,
"step": 246500
},
{
"epoch": 10.411836614256208,
"grad_norm": 1.1796971559524536,
"learning_rate": 5.93534279466621e-05,
"loss": 4.9053,
"step": 247000
},
{
"epoch": 10.432913206592758,
"grad_norm": 1.236948013305664,
"learning_rate": 5.9267675684946186e-05,
"loss": 4.9101,
"step": 247500
},
{
"epoch": 10.453989798929308,
"grad_norm": 1.232825517654419,
"learning_rate": 5.918192342323029e-05,
"loss": 4.9045,
"step": 248000
},
{
"epoch": 10.47506639126586,
"grad_norm": 1.2062400579452515,
"learning_rate": 5.909617116151439e-05,
"loss": 4.9049,
"step": 248500
},
{
"epoch": 10.496142983602411,
"grad_norm": 1.2676557302474976,
"learning_rate": 5.901041889979848e-05,
"loss": 4.9029,
"step": 249000
},
{
"epoch": 10.517219575938963,
"grad_norm": 2.300474166870117,
"learning_rate": 5.892466663808258e-05,
"loss": 4.9039,
"step": 249500
},
{
"epoch": 10.538296168275513,
"grad_norm": 1.217051386833191,
"learning_rate": 5.8838914376366684e-05,
"loss": 4.9072,
"step": 250000
},
{
"epoch": 10.538296168275513,
"eval_accuracy": 0.7276299860230017,
"eval_loss": 4.650235176086426,
"eval_runtime": 284.4126,
"eval_samples_per_second": 424.405,
"eval_steps_per_second": 4.423,
"step": 250000
},
{
"epoch": 10.559372760612064,
"grad_norm": 1.2072945833206177,
"learning_rate": 5.8753162114650776e-05,
"loss": 4.9106,
"step": 250500
},
{
"epoch": 10.580449352948616,
"grad_norm": 1.2472292184829712,
"learning_rate": 5.8667409852934874e-05,
"loss": 4.9016,
"step": 251000
},
{
"epoch": 10.601525945285166,
"grad_norm": 1.213215947151184,
"learning_rate": 5.858165759121897e-05,
"loss": 4.899,
"step": 251500
},
{
"epoch": 10.622602537621717,
"grad_norm": 1.2344470024108887,
"learning_rate": 5.8495905329503064e-05,
"loss": 4.9052,
"step": 252000
},
{
"epoch": 10.643679129958269,
"grad_norm": 1.2523607015609741,
"learning_rate": 5.841015306778716e-05,
"loss": 4.9034,
"step": 252500
},
{
"epoch": 10.664755722294819,
"grad_norm": 1.1948623657226562,
"learning_rate": 5.832440080607127e-05,
"loss": 4.9021,
"step": 253000
},
{
"epoch": 10.685832314631371,
"grad_norm": 1.255079984664917,
"learning_rate": 5.823864854435536e-05,
"loss": 4.8959,
"step": 253500
},
{
"epoch": 10.706908906967922,
"grad_norm": 1.242619276046753,
"learning_rate": 5.815289628263946e-05,
"loss": 4.9004,
"step": 254000
},
{
"epoch": 10.727985499304472,
"grad_norm": 1.2049857378005981,
"learning_rate": 5.8067144020923556e-05,
"loss": 4.9052,
"step": 254500
},
{
"epoch": 10.749062091641024,
"grad_norm": 1.2536211013793945,
"learning_rate": 5.798139175920765e-05,
"loss": 4.8993,
"step": 255000
},
{
"epoch": 10.770138683977574,
"grad_norm": 1.2596551179885864,
"learning_rate": 5.789563949749175e-05,
"loss": 4.9016,
"step": 255500
},
{
"epoch": 10.791215276314126,
"grad_norm": 1.209492564201355,
"learning_rate": 5.780988723577585e-05,
"loss": 4.8984,
"step": 256000
},
{
"epoch": 10.812291868650677,
"grad_norm": 1.207811951637268,
"learning_rate": 5.772413497405994e-05,
"loss": 4.8986,
"step": 256500
},
{
"epoch": 10.833368460987227,
"grad_norm": 1.250611424446106,
"learning_rate": 5.763838271234404e-05,
"loss": 4.8935,
"step": 257000
},
{
"epoch": 10.85444505332378,
"grad_norm": 1.2168982028961182,
"learning_rate": 5.7552630450628146e-05,
"loss": 4.8983,
"step": 257500
},
{
"epoch": 10.87552164566033,
"grad_norm": 1.2053166627883911,
"learning_rate": 5.746687818891223e-05,
"loss": 4.895,
"step": 258000
},
{
"epoch": 10.89659823799688,
"grad_norm": 1.2407495975494385,
"learning_rate": 5.7381125927196335e-05,
"loss": 4.8966,
"step": 258500
},
{
"epoch": 10.917674830333432,
"grad_norm": 1.2330180406570435,
"learning_rate": 5.7295373665480434e-05,
"loss": 4.8965,
"step": 259000
},
{
"epoch": 10.938751422669982,
"grad_norm": 1.2390776872634888,
"learning_rate": 5.7209621403764525e-05,
"loss": 4.8948,
"step": 259500
},
{
"epoch": 10.959828015006533,
"grad_norm": 1.25767982006073,
"learning_rate": 5.7123869142048624e-05,
"loss": 4.8916,
"step": 260000
},
{
"epoch": 10.959828015006533,
"eval_accuracy": 0.7290244473492803,
"eval_loss": 4.641484260559082,
"eval_runtime": 283.4467,
"eval_samples_per_second": 425.851,
"eval_steps_per_second": 4.438,
"step": 260000
},
{
"epoch": 10.980904607343085,
"grad_norm": 1.2347427606582642,
"learning_rate": 5.703811688033273e-05,
"loss": 4.8905,
"step": 260500
},
{
"epoch": 11.001981199679635,
"grad_norm": 1.2407792806625366,
"learning_rate": 5.695236461861681e-05,
"loss": 4.8907,
"step": 261000
},
{
"epoch": 11.023057792016187,
"grad_norm": 1.2239134311676025,
"learning_rate": 5.686661235690092e-05,
"loss": 4.8845,
"step": 261500
},
{
"epoch": 11.044134384352738,
"grad_norm": 1.2286486625671387,
"learning_rate": 5.678086009518502e-05,
"loss": 4.8906,
"step": 262000
},
{
"epoch": 11.065210976689288,
"grad_norm": 1.2519724369049072,
"learning_rate": 5.669510783346911e-05,
"loss": 4.8929,
"step": 262500
},
{
"epoch": 11.08628756902584,
"grad_norm": 1.2288354635238647,
"learning_rate": 5.6609355571753207e-05,
"loss": 4.886,
"step": 263000
},
{
"epoch": 11.10736416136239,
"grad_norm": 1.2799263000488281,
"learning_rate": 5.652360331003731e-05,
"loss": 4.8889,
"step": 263500
},
{
"epoch": 11.128440753698943,
"grad_norm": 1.2295010089874268,
"learning_rate": 5.6437851048321396e-05,
"loss": 4.8851,
"step": 264000
},
{
"epoch": 11.149517346035493,
"grad_norm": 1.250861406326294,
"learning_rate": 5.63520987866055e-05,
"loss": 4.8895,
"step": 264500
},
{
"epoch": 11.170593938372043,
"grad_norm": 1.1974949836730957,
"learning_rate": 5.62663465248896e-05,
"loss": 4.8856,
"step": 265000
},
{
"epoch": 11.191670530708596,
"grad_norm": 1.2446671724319458,
"learning_rate": 5.618059426317369e-05,
"loss": 4.8838,
"step": 265500
},
{
"epoch": 11.212747123045146,
"grad_norm": 1.2154011726379395,
"learning_rate": 5.609484200145779e-05,
"loss": 4.89,
"step": 266000
},
{
"epoch": 11.233823715381696,
"grad_norm": 1.2143205404281616,
"learning_rate": 5.6009089739741895e-05,
"loss": 4.8885,
"step": 266500
},
{
"epoch": 11.254900307718248,
"grad_norm": 1.251407265663147,
"learning_rate": 5.5923337478025986e-05,
"loss": 4.8844,
"step": 267000
},
{
"epoch": 11.275976900054799,
"grad_norm": 1.2573626041412354,
"learning_rate": 5.5837585216310085e-05,
"loss": 4.8879,
"step": 267500
},
{
"epoch": 11.29705349239135,
"grad_norm": 1.2452291250228882,
"learning_rate": 5.575183295459418e-05,
"loss": 4.8831,
"step": 268000
},
{
"epoch": 11.318130084727901,
"grad_norm": 1.22361421585083,
"learning_rate": 5.5666080692878274e-05,
"loss": 4.8852,
"step": 268500
},
{
"epoch": 11.339206677064452,
"grad_norm": 1.1839536428451538,
"learning_rate": 5.558032843116238e-05,
"loss": 4.8857,
"step": 269000
},
{
"epoch": 11.360283269401004,
"grad_norm": 1.2024681568145752,
"learning_rate": 5.549457616944648e-05,
"loss": 4.8849,
"step": 269500
},
{
"epoch": 11.381359861737554,
"grad_norm": 1.2301065921783447,
"learning_rate": 5.540882390773057e-05,
"loss": 4.8805,
"step": 270000
},
{
"epoch": 11.381359861737554,
"eval_accuracy": 0.7301433062447764,
"eval_loss": 4.634106636047363,
"eval_runtime": 265.2265,
"eval_samples_per_second": 455.105,
"eval_steps_per_second": 4.743,
"step": 270000
},
{
"epoch": 11.402436454074106,
"grad_norm": 1.215085744857788,
"learning_rate": 5.532307164601467e-05,
"loss": 4.8839,
"step": 270500
},
{
"epoch": 11.423513046410656,
"grad_norm": 1.2867430448532104,
"learning_rate": 5.5237319384298766e-05,
"loss": 4.8812,
"step": 271000
},
{
"epoch": 11.444589638747207,
"grad_norm": 1.2057380676269531,
"learning_rate": 5.515156712258286e-05,
"loss": 4.8791,
"step": 271500
},
{
"epoch": 11.465666231083759,
"grad_norm": 1.264712929725647,
"learning_rate": 5.506581486086696e-05,
"loss": 4.8812,
"step": 272000
},
{
"epoch": 11.48674282342031,
"grad_norm": 1.2373634576797485,
"learning_rate": 5.498006259915105e-05,
"loss": 4.88,
"step": 272500
},
{
"epoch": 11.50781941575686,
"grad_norm": 1.200764536857605,
"learning_rate": 5.489431033743515e-05,
"loss": 4.8804,
"step": 273000
},
{
"epoch": 11.528896008093412,
"grad_norm": 1.2583191394805908,
"learning_rate": 5.480855807571925e-05,
"loss": 4.8821,
"step": 273500
},
{
"epoch": 11.549972600429962,
"grad_norm": 1.2284276485443115,
"learning_rate": 5.472280581400334e-05,
"loss": 4.8786,
"step": 274000
},
{
"epoch": 11.571049192766514,
"grad_norm": 1.2257758378982544,
"learning_rate": 5.463705355228744e-05,
"loss": 4.8792,
"step": 274500
},
{
"epoch": 11.592125785103065,
"grad_norm": 1.224541187286377,
"learning_rate": 5.4551301290571546e-05,
"loss": 4.8822,
"step": 275000
},
{
"epoch": 11.613202377439615,
"grad_norm": 1.2449454069137573,
"learning_rate": 5.446554902885563e-05,
"loss": 4.8791,
"step": 275500
},
{
"epoch": 11.634278969776167,
"grad_norm": 1.2074803113937378,
"learning_rate": 5.4379796767139736e-05,
"loss": 4.8756,
"step": 276000
},
{
"epoch": 11.655355562112717,
"grad_norm": 1.2630691528320312,
"learning_rate": 5.4294044505423834e-05,
"loss": 4.8755,
"step": 276500
},
{
"epoch": 11.67643215444927,
"grad_norm": 1.2212343215942383,
"learning_rate": 5.4208292243707925e-05,
"loss": 4.877,
"step": 277000
},
{
"epoch": 11.69750874678582,
"grad_norm": 1.2128989696502686,
"learning_rate": 5.4122539981992024e-05,
"loss": 4.8763,
"step": 277500
},
{
"epoch": 11.71858533912237,
"grad_norm": 1.2095223665237427,
"learning_rate": 5.403678772027613e-05,
"loss": 4.8714,
"step": 278000
},
{
"epoch": 11.739661931458922,
"grad_norm": 1.2840549945831299,
"learning_rate": 5.395103545856022e-05,
"loss": 4.8761,
"step": 278500
},
{
"epoch": 11.760738523795473,
"grad_norm": 1.1996203660964966,
"learning_rate": 5.386528319684432e-05,
"loss": 4.8722,
"step": 279000
},
{
"epoch": 11.781815116132023,
"grad_norm": 1.210436463356018,
"learning_rate": 5.377953093512842e-05,
"loss": 4.8706,
"step": 279500
},
{
"epoch": 11.802891708468575,
"grad_norm": 1.2381254434585571,
"learning_rate": 5.369377867341251e-05,
"loss": 4.8775,
"step": 280000
},
{
"epoch": 11.802891708468575,
"eval_accuracy": 0.7317542242673482,
"eval_loss": 4.623093128204346,
"eval_runtime": 264.4824,
"eval_samples_per_second": 456.386,
"eval_steps_per_second": 4.756,
"step": 280000
},
{
"epoch": 11.823968300805126,
"grad_norm": 1.281639575958252,
"learning_rate": 5.3608026411696614e-05,
"loss": 4.8785,
"step": 280500
},
{
"epoch": 11.845044893141678,
"grad_norm": 1.2299939393997192,
"learning_rate": 5.352227414998071e-05,
"loss": 4.8678,
"step": 281000
},
{
"epoch": 11.866121485478228,
"grad_norm": 1.2891165018081665,
"learning_rate": 5.34365218882648e-05,
"loss": 4.8708,
"step": 281500
},
{
"epoch": 11.887198077814778,
"grad_norm": 1.2055779695510864,
"learning_rate": 5.33507696265489e-05,
"loss": 4.8656,
"step": 282000
},
{
"epoch": 11.90827467015133,
"grad_norm": 1.234351634979248,
"learning_rate": 5.3265017364833e-05,
"loss": 4.8756,
"step": 282500
},
{
"epoch": 11.92935126248788,
"grad_norm": 1.2707619667053223,
"learning_rate": 5.317926510311709e-05,
"loss": 4.8708,
"step": 283000
},
{
"epoch": 11.950427854824433,
"grad_norm": 1.2452911138534546,
"learning_rate": 5.3093512841401197e-05,
"loss": 4.8696,
"step": 283500
},
{
"epoch": 11.971504447160983,
"grad_norm": 1.232471227645874,
"learning_rate": 5.3007760579685295e-05,
"loss": 4.8709,
"step": 284000
},
{
"epoch": 11.992581039497534,
"grad_norm": 1.2290921211242676,
"learning_rate": 5.2922008317969386e-05,
"loss": 4.8744,
"step": 284500
},
{
"epoch": 12.013657631834086,
"grad_norm": 1.2471346855163574,
"learning_rate": 5.2836256056253485e-05,
"loss": 4.868,
"step": 285000
},
{
"epoch": 12.034734224170636,
"grad_norm": 1.2504161596298218,
"learning_rate": 5.275050379453759e-05,
"loss": 4.8681,
"step": 285500
},
{
"epoch": 12.055810816507186,
"grad_norm": 1.177639365196228,
"learning_rate": 5.2664751532821675e-05,
"loss": 4.8658,
"step": 286000
},
{
"epoch": 12.076887408843739,
"grad_norm": 1.2195996046066284,
"learning_rate": 5.257899927110578e-05,
"loss": 4.8647,
"step": 286500
},
{
"epoch": 12.097964001180289,
"grad_norm": 1.2204630374908447,
"learning_rate": 5.249324700938988e-05,
"loss": 4.8623,
"step": 287000
},
{
"epoch": 12.119040593516841,
"grad_norm": 1.247004747390747,
"learning_rate": 5.240749474767397e-05,
"loss": 4.8625,
"step": 287500
},
{
"epoch": 12.140117185853391,
"grad_norm": 1.2698380947113037,
"learning_rate": 5.232174248595807e-05,
"loss": 4.8662,
"step": 288000
},
{
"epoch": 12.161193778189942,
"grad_norm": 1.2238640785217285,
"learning_rate": 5.223599022424217e-05,
"loss": 4.8662,
"step": 288500
},
{
"epoch": 12.182270370526494,
"grad_norm": 1.2402151823043823,
"learning_rate": 5.215023796252626e-05,
"loss": 4.8699,
"step": 289000
},
{
"epoch": 12.203346962863044,
"grad_norm": 1.276735782623291,
"learning_rate": 5.206448570081036e-05,
"loss": 4.8611,
"step": 289500
},
{
"epoch": 12.224423555199595,
"grad_norm": 1.2938944101333618,
"learning_rate": 5.197873343909446e-05,
"loss": 4.86,
"step": 290000
},
{
"epoch": 12.224423555199595,
"eval_accuracy": 0.7329991822619654,
"eval_loss": 4.616809844970703,
"eval_runtime": 277.3812,
"eval_samples_per_second": 435.163,
"eval_steps_per_second": 4.535,
"step": 290000
},
{
"epoch": 12.245500147536147,
"grad_norm": 1.269185185432434,
"learning_rate": 5.189298117737855e-05,
"loss": 4.8619,
"step": 290500
},
{
"epoch": 12.266576739872697,
"grad_norm": 1.263180136680603,
"learning_rate": 5.180722891566265e-05,
"loss": 4.8608,
"step": 291000
},
{
"epoch": 12.28765333220925,
"grad_norm": 1.2546781301498413,
"learning_rate": 5.1721476653946756e-05,
"loss": 4.8569,
"step": 291500
},
{
"epoch": 12.3087299245458,
"grad_norm": 1.2648341655731201,
"learning_rate": 5.163572439223085e-05,
"loss": 4.8609,
"step": 292000
},
{
"epoch": 12.32980651688235,
"grad_norm": 1.2304905652999878,
"learning_rate": 5.1549972130514946e-05,
"loss": 4.8618,
"step": 292500
},
{
"epoch": 12.350883109218902,
"grad_norm": 1.2329498529434204,
"learning_rate": 5.1464219868799044e-05,
"loss": 4.861,
"step": 293000
},
{
"epoch": 12.371959701555452,
"grad_norm": 1.2604204416275024,
"learning_rate": 5.1378467607083136e-05,
"loss": 4.8592,
"step": 293500
},
{
"epoch": 12.393036293892003,
"grad_norm": 1.2230466604232788,
"learning_rate": 5.1292715345367234e-05,
"loss": 4.8604,
"step": 294000
},
{
"epoch": 12.414112886228555,
"grad_norm": 1.261117696762085,
"learning_rate": 5.120696308365134e-05,
"loss": 4.8568,
"step": 294500
},
{
"epoch": 12.435189478565105,
"grad_norm": 1.352609634399414,
"learning_rate": 5.112121082193543e-05,
"loss": 4.86,
"step": 295000
},
{
"epoch": 12.456266070901657,
"grad_norm": 1.2068434953689575,
"learning_rate": 5.103545856021953e-05,
"loss": 4.8585,
"step": 295500
},
{
"epoch": 12.477342663238208,
"grad_norm": 1.2897241115570068,
"learning_rate": 5.094970629850363e-05,
"loss": 4.8594,
"step": 296000
},
{
"epoch": 12.498419255574758,
"grad_norm": 1.22060227394104,
"learning_rate": 5.086395403678772e-05,
"loss": 4.8581,
"step": 296500
},
{
"epoch": 12.51949584791131,
"grad_norm": 1.2450950145721436,
"learning_rate": 5.0778201775071824e-05,
"loss": 4.8634,
"step": 297000
},
{
"epoch": 12.54057244024786,
"grad_norm": 1.2779291868209839,
"learning_rate": 5.069244951335592e-05,
"loss": 4.8604,
"step": 297500
},
{
"epoch": 12.561649032584413,
"grad_norm": 1.2685707807540894,
"learning_rate": 5.0606697251640014e-05,
"loss": 4.8548,
"step": 298000
},
{
"epoch": 12.582725624920963,
"grad_norm": 1.249001145362854,
"learning_rate": 5.052094498992411e-05,
"loss": 4.8601,
"step": 298500
},
{
"epoch": 12.603802217257513,
"grad_norm": 1.2661477327346802,
"learning_rate": 5.043519272820822e-05,
"loss": 4.8584,
"step": 299000
},
{
"epoch": 12.624878809594065,
"grad_norm": 1.2500452995300293,
"learning_rate": 5.03494404664923e-05,
"loss": 4.8525,
"step": 299500
},
{
"epoch": 12.645955401930616,
"grad_norm": 1.3004056215286255,
"learning_rate": 5.026368820477641e-05,
"loss": 4.8539,
"step": 300000
},
{
"epoch": 12.645955401930616,
"eval_accuracy": 0.734579519436118,
"eval_loss": 4.606636047363281,
"eval_runtime": 282.2835,
"eval_samples_per_second": 427.606,
"eval_steps_per_second": 4.457,
"step": 300000
},
{
"epoch": 12.667031994267166,
"grad_norm": 1.2377026081085205,
"learning_rate": 5.0177935943060505e-05,
"loss": 4.8519,
"step": 300500
},
{
"epoch": 12.688108586603718,
"grad_norm": 1.2364566326141357,
"learning_rate": 5.00921836813446e-05,
"loss": 4.8577,
"step": 301000
},
{
"epoch": 12.709185178940269,
"grad_norm": 1.2762845754623413,
"learning_rate": 5.0006431419628695e-05,
"loss": 4.8553,
"step": 301500
},
{
"epoch": 12.73026177127682,
"grad_norm": 1.2642459869384766,
"learning_rate": 4.992067915791279e-05,
"loss": 4.855,
"step": 302000
},
{
"epoch": 12.751338363613371,
"grad_norm": 1.273370623588562,
"learning_rate": 4.9834926896196885e-05,
"loss": 4.8485,
"step": 302500
},
{
"epoch": 12.772414955949921,
"grad_norm": 1.3058017492294312,
"learning_rate": 4.974917463448099e-05,
"loss": 4.8552,
"step": 303000
},
{
"epoch": 12.793491548286474,
"grad_norm": 1.2248115539550781,
"learning_rate": 4.966342237276508e-05,
"loss": 4.8556,
"step": 303500
},
{
"epoch": 12.814568140623024,
"grad_norm": 1.2276921272277832,
"learning_rate": 4.957767011104918e-05,
"loss": 4.8534,
"step": 304000
},
{
"epoch": 12.835644732959576,
"grad_norm": 1.2708356380462646,
"learning_rate": 4.949191784933328e-05,
"loss": 4.854,
"step": 304500
},
{
"epoch": 12.856721325296126,
"grad_norm": 1.2179415225982666,
"learning_rate": 4.9406165587617376e-05,
"loss": 4.8513,
"step": 305000
},
{
"epoch": 12.877797917632677,
"grad_norm": 1.2671788930892944,
"learning_rate": 4.932041332590147e-05,
"loss": 4.8539,
"step": 305500
},
{
"epoch": 12.898874509969229,
"grad_norm": 1.2464276552200317,
"learning_rate": 4.923466106418557e-05,
"loss": 4.8498,
"step": 306000
},
{
"epoch": 12.91995110230578,
"grad_norm": 1.2503352165222168,
"learning_rate": 4.9148908802469665e-05,
"loss": 4.8485,
"step": 306500
},
{
"epoch": 12.94102769464233,
"grad_norm": 1.205830693244934,
"learning_rate": 4.906315654075376e-05,
"loss": 4.8503,
"step": 307000
},
{
"epoch": 12.962104286978882,
"grad_norm": 1.2623730897903442,
"learning_rate": 4.897740427903786e-05,
"loss": 4.8529,
"step": 307500
},
{
"epoch": 12.983180879315432,
"grad_norm": 1.2360824346542358,
"learning_rate": 4.889165201732196e-05,
"loss": 4.8558,
"step": 308000
},
{
"epoch": 13.004257471651984,
"grad_norm": 1.2536768913269043,
"learning_rate": 4.880589975560606e-05,
"loss": 4.8479,
"step": 308500
},
{
"epoch": 13.025334063988534,
"grad_norm": 1.2707194089889526,
"learning_rate": 4.8720147493890156e-05,
"loss": 4.8404,
"step": 309000
},
{
"epoch": 13.046410656325085,
"grad_norm": 1.2145887613296509,
"learning_rate": 4.8634395232174254e-05,
"loss": 4.8467,
"step": 309500
},
{
"epoch": 13.067487248661637,
"grad_norm": 1.1987941265106201,
"learning_rate": 4.8548642970458346e-05,
"loss": 4.8416,
"step": 310000
},
{
"epoch": 13.067487248661637,
"eval_accuracy": 0.7359324219672384,
"eval_loss": 4.598988056182861,
"eval_runtime": 264.3878,
"eval_samples_per_second": 456.549,
"eval_steps_per_second": 4.758,
"step": 310000
},
{
"epoch": 13.088563840998187,
"grad_norm": 1.250051498413086,
"learning_rate": 4.846289070874245e-05,
"loss": 4.8436,
"step": 310500
},
{
"epoch": 13.109640433334738,
"grad_norm": 1.227810263633728,
"learning_rate": 4.837713844702654e-05,
"loss": 4.8433,
"step": 311000
},
{
"epoch": 13.13071702567129,
"grad_norm": 1.2776970863342285,
"learning_rate": 4.829138618531064e-05,
"loss": 4.8407,
"step": 311500
},
{
"epoch": 13.15179361800784,
"grad_norm": 1.2327202558517456,
"learning_rate": 4.820563392359474e-05,
"loss": 4.8448,
"step": 312000
},
{
"epoch": 13.172870210344392,
"grad_norm": 1.2453476190567017,
"learning_rate": 4.811988166187884e-05,
"loss": 4.843,
"step": 312500
},
{
"epoch": 13.193946802680943,
"grad_norm": 1.24977707862854,
"learning_rate": 4.803412940016293e-05,
"loss": 4.8408,
"step": 313000
},
{
"epoch": 13.215023395017493,
"grad_norm": 1.2432777881622314,
"learning_rate": 4.7948377138447034e-05,
"loss": 4.8428,
"step": 313500
},
{
"epoch": 13.236099987354045,
"grad_norm": 1.241550326347351,
"learning_rate": 4.7862624876731126e-05,
"loss": 4.8415,
"step": 314000
},
{
"epoch": 13.257176579690595,
"grad_norm": 1.241142749786377,
"learning_rate": 4.7776872615015224e-05,
"loss": 4.84,
"step": 314500
},
{
"epoch": 13.278253172027147,
"grad_norm": 1.2663397789001465,
"learning_rate": 4.7691120353299315e-05,
"loss": 4.8361,
"step": 315000
},
{
"epoch": 13.299329764363698,
"grad_norm": 1.2450600862503052,
"learning_rate": 4.760536809158342e-05,
"loss": 4.8447,
"step": 315500
},
{
"epoch": 13.320406356700248,
"grad_norm": 1.2039545774459839,
"learning_rate": 4.751961582986751e-05,
"loss": 4.843,
"step": 316000
},
{
"epoch": 13.3414829490368,
"grad_norm": 1.2520126104354858,
"learning_rate": 4.743386356815161e-05,
"loss": 4.8431,
"step": 316500
},
{
"epoch": 13.36255954137335,
"grad_norm": 1.2289433479309082,
"learning_rate": 4.734811130643571e-05,
"loss": 4.8403,
"step": 317000
},
{
"epoch": 13.383636133709901,
"grad_norm": 1.2269790172576904,
"learning_rate": 4.726235904471981e-05,
"loss": 4.8388,
"step": 317500
},
{
"epoch": 13.404712726046453,
"grad_norm": 1.2989997863769531,
"learning_rate": 4.71766067830039e-05,
"loss": 4.8398,
"step": 318000
},
{
"epoch": 13.425789318383003,
"grad_norm": 1.240084171295166,
"learning_rate": 4.7090854521288004e-05,
"loss": 4.8408,
"step": 318500
},
{
"epoch": 13.446865910719556,
"grad_norm": 1.2415014505386353,
"learning_rate": 4.7005102259572095e-05,
"loss": 4.8397,
"step": 319000
},
{
"epoch": 13.467942503056106,
"grad_norm": 1.2695077657699585,
"learning_rate": 4.6919349997856193e-05,
"loss": 4.8412,
"step": 319500
},
{
"epoch": 13.489019095392656,
"grad_norm": 1.2876310348510742,
"learning_rate": 4.683359773614029e-05,
"loss": 4.8371,
"step": 320000
},
{
"epoch": 13.489019095392656,
"eval_accuracy": 0.7366342159961374,
"eval_loss": 4.593217849731445,
"eval_runtime": 261.8021,
"eval_samples_per_second": 461.058,
"eval_steps_per_second": 4.805,
"step": 320000
},
{
"epoch": 13.510095687729208,
"grad_norm": 1.292291522026062,
"learning_rate": 4.674784547442439e-05,
"loss": 4.8427,
"step": 320500
},
{
"epoch": 13.531172280065759,
"grad_norm": 1.248621940612793,
"learning_rate": 4.666209321270849e-05,
"loss": 4.8401,
"step": 321000
},
{
"epoch": 13.55224887240231,
"grad_norm": 1.2627707719802856,
"learning_rate": 4.657634095099259e-05,
"loss": 4.838,
"step": 321500
},
{
"epoch": 13.573325464738861,
"grad_norm": 1.2634505033493042,
"learning_rate": 4.6490588689276685e-05,
"loss": 4.8382,
"step": 322000
},
{
"epoch": 13.594402057075412,
"grad_norm": 1.248149037361145,
"learning_rate": 4.6404836427560777e-05,
"loss": 4.8389,
"step": 322500
},
{
"epoch": 13.615478649411964,
"grad_norm": 1.2694674730300903,
"learning_rate": 4.6319084165844875e-05,
"loss": 4.8338,
"step": 323000
},
{
"epoch": 13.636555241748514,
"grad_norm": 1.2487857341766357,
"learning_rate": 4.623333190412897e-05,
"loss": 4.8389,
"step": 323500
},
{
"epoch": 13.657631834085064,
"grad_norm": 1.2810231447219849,
"learning_rate": 4.614757964241307e-05,
"loss": 4.8333,
"step": 324000
},
{
"epoch": 13.678708426421617,
"grad_norm": 1.2491830587387085,
"learning_rate": 4.606182738069717e-05,
"loss": 4.8362,
"step": 324500
},
{
"epoch": 13.699785018758167,
"grad_norm": 1.273911476135254,
"learning_rate": 4.597607511898127e-05,
"loss": 4.8356,
"step": 325000
},
{
"epoch": 13.720861611094719,
"grad_norm": 1.2571637630462646,
"learning_rate": 4.589032285726536e-05,
"loss": 4.8352,
"step": 325500
},
{
"epoch": 13.74193820343127,
"grad_norm": 1.27104914188385,
"learning_rate": 4.5804570595549465e-05,
"loss": 4.8349,
"step": 326000
},
{
"epoch": 13.76301479576782,
"grad_norm": 1.2311949729919434,
"learning_rate": 4.5718818333833556e-05,
"loss": 4.8376,
"step": 326500
},
{
"epoch": 13.784091388104372,
"grad_norm": 1.2964130640029907,
"learning_rate": 4.5633066072117655e-05,
"loss": 4.8376,
"step": 327000
},
{
"epoch": 13.805167980440922,
"grad_norm": 1.2461034059524536,
"learning_rate": 4.554731381040175e-05,
"loss": 4.8309,
"step": 327500
},
{
"epoch": 13.826244572777473,
"grad_norm": 1.2934880256652832,
"learning_rate": 4.546156154868585e-05,
"loss": 4.8343,
"step": 328000
},
{
"epoch": 13.847321165114025,
"grad_norm": 1.2866716384887695,
"learning_rate": 4.537580928696994e-05,
"loss": 4.8331,
"step": 328500
},
{
"epoch": 13.868397757450575,
"grad_norm": 1.2519258260726929,
"learning_rate": 4.529005702525405e-05,
"loss": 4.8334,
"step": 329000
},
{
"epoch": 13.889474349787127,
"grad_norm": 1.3299874067306519,
"learning_rate": 4.520430476353814e-05,
"loss": 4.8321,
"step": 329500
},
{
"epoch": 13.910550942123677,
"grad_norm": 1.2699922323226929,
"learning_rate": 4.511855250182224e-05,
"loss": 4.8312,
"step": 330000
},
{
"epoch": 13.910550942123677,
"eval_accuracy": 0.7373057333678058,
"eval_loss": 4.587713241577148,
"eval_runtime": 260.299,
"eval_samples_per_second": 463.721,
"eval_steps_per_second": 4.833,
"step": 330000
},
{
"epoch": 13.931627534460228,
"grad_norm": 1.251166582107544,
"learning_rate": 4.5032800240106336e-05,
"loss": 4.8331,
"step": 330500
},
{
"epoch": 13.95270412679678,
"grad_norm": 1.259575605392456,
"learning_rate": 4.4947047978390434e-05,
"loss": 4.837,
"step": 331000
},
{
"epoch": 13.97378071913333,
"grad_norm": 1.2549121379852295,
"learning_rate": 4.4861295716674526e-05,
"loss": 4.8335,
"step": 331500
},
{
"epoch": 13.994857311469882,
"grad_norm": 1.2795922756195068,
"learning_rate": 4.4775543454958624e-05,
"loss": 4.8342,
"step": 332000
},
{
"epoch": 14.015933903806433,
"grad_norm": 1.2600176334381104,
"learning_rate": 4.468979119324272e-05,
"loss": 4.8255,
"step": 332500
},
{
"epoch": 14.037010496142983,
"grad_norm": 1.2377549409866333,
"learning_rate": 4.460403893152682e-05,
"loss": 4.8264,
"step": 333000
},
{
"epoch": 14.058087088479535,
"grad_norm": 1.2700035572052002,
"learning_rate": 4.451828666981092e-05,
"loss": 4.8259,
"step": 333500
},
{
"epoch": 14.079163680816086,
"grad_norm": 1.2455906867980957,
"learning_rate": 4.443253440809502e-05,
"loss": 4.8264,
"step": 334000
},
{
"epoch": 14.100240273152636,
"grad_norm": 1.2382512092590332,
"learning_rate": 4.4346782146379116e-05,
"loss": 4.8253,
"step": 334500
},
{
"epoch": 14.121316865489188,
"grad_norm": 1.2504768371582031,
"learning_rate": 4.426102988466321e-05,
"loss": 4.8239,
"step": 335000
},
{
"epoch": 14.142393457825738,
"grad_norm": 1.2309986352920532,
"learning_rate": 4.4175277622947305e-05,
"loss": 4.8193,
"step": 335500
},
{
"epoch": 14.16347005016229,
"grad_norm": 1.237213134765625,
"learning_rate": 4.4089525361231404e-05,
"loss": 4.8254,
"step": 336000
},
{
"epoch": 14.184546642498841,
"grad_norm": 1.2180109024047852,
"learning_rate": 4.40037730995155e-05,
"loss": 4.8202,
"step": 336500
},
{
"epoch": 14.205623234835391,
"grad_norm": 1.2724729776382446,
"learning_rate": 4.39180208377996e-05,
"loss": 4.8251,
"step": 337000
},
{
"epoch": 14.226699827171943,
"grad_norm": 1.273085117340088,
"learning_rate": 4.38322685760837e-05,
"loss": 4.8254,
"step": 337500
},
{
"epoch": 14.247776419508494,
"grad_norm": 1.2518419027328491,
"learning_rate": 4.374651631436779e-05,
"loss": 4.8238,
"step": 338000
},
{
"epoch": 14.268853011845044,
"grad_norm": 1.265973448753357,
"learning_rate": 4.3660764052651895e-05,
"loss": 4.8259,
"step": 338500
},
{
"epoch": 14.289929604181596,
"grad_norm": 1.249526858329773,
"learning_rate": 4.357501179093599e-05,
"loss": 4.8248,
"step": 339000
},
{
"epoch": 14.311006196518147,
"grad_norm": 1.2658559083938599,
"learning_rate": 4.3489259529220085e-05,
"loss": 4.8227,
"step": 339500
},
{
"epoch": 14.332082788854699,
"grad_norm": 1.3197706937789917,
"learning_rate": 4.3403507267504183e-05,
"loss": 4.8264,
"step": 340000
},
{
"epoch": 14.332082788854699,
"eval_accuracy": 0.7391617984553163,
"eval_loss": 4.578587532043457,
"eval_runtime": 261.1985,
"eval_samples_per_second": 462.124,
"eval_steps_per_second": 4.816,
"step": 340000
},
{
"epoch": 14.353159381191249,
"grad_norm": 1.3134719133377075,
"learning_rate": 4.331775500578828e-05,
"loss": 4.8229,
"step": 340500
},
{
"epoch": 14.3742359735278,
"grad_norm": 1.2750115394592285,
"learning_rate": 4.323200274407237e-05,
"loss": 4.8239,
"step": 341000
},
{
"epoch": 14.395312565864351,
"grad_norm": 1.3529267311096191,
"learning_rate": 4.314625048235648e-05,
"loss": 4.8197,
"step": 341500
},
{
"epoch": 14.416389158200902,
"grad_norm": 1.2445405721664429,
"learning_rate": 4.306049822064057e-05,
"loss": 4.8253,
"step": 342000
},
{
"epoch": 14.437465750537454,
"grad_norm": 1.2353616952896118,
"learning_rate": 4.297474595892467e-05,
"loss": 4.8242,
"step": 342500
},
{
"epoch": 14.458542342874004,
"grad_norm": 1.2449012994766235,
"learning_rate": 4.2888993697208767e-05,
"loss": 4.8199,
"step": 343000
},
{
"epoch": 14.479618935210555,
"grad_norm": 1.2301703691482544,
"learning_rate": 4.2803241435492865e-05,
"loss": 4.8217,
"step": 343500
},
{
"epoch": 14.500695527547107,
"grad_norm": 1.2420936822891235,
"learning_rate": 4.2717489173776956e-05,
"loss": 4.8211,
"step": 344000
},
{
"epoch": 14.521772119883657,
"grad_norm": 1.2967267036437988,
"learning_rate": 4.263173691206106e-05,
"loss": 4.8206,
"step": 344500
},
{
"epoch": 14.542848712220207,
"grad_norm": 1.3207632303237915,
"learning_rate": 4.254598465034515e-05,
"loss": 4.8212,
"step": 345000
},
{
"epoch": 14.56392530455676,
"grad_norm": 1.2333003282546997,
"learning_rate": 4.246023238862925e-05,
"loss": 4.8207,
"step": 345500
},
{
"epoch": 14.58500189689331,
"grad_norm": 1.4816551208496094,
"learning_rate": 4.237448012691335e-05,
"loss": 4.8191,
"step": 346000
},
{
"epoch": 14.606078489229862,
"grad_norm": 1.2691810131072998,
"learning_rate": 4.228872786519745e-05,
"loss": 4.8173,
"step": 346500
},
{
"epoch": 14.627155081566412,
"grad_norm": 1.2760276794433594,
"learning_rate": 4.220297560348154e-05,
"loss": 4.8207,
"step": 347000
},
{
"epoch": 14.648231673902963,
"grad_norm": 1.2917193174362183,
"learning_rate": 4.2117223341765645e-05,
"loss": 4.8166,
"step": 347500
},
{
"epoch": 14.669308266239515,
"grad_norm": 1.247707724571228,
"learning_rate": 4.2031471080049736e-05,
"loss": 4.8175,
"step": 348000
},
{
"epoch": 14.690384858576065,
"grad_norm": 1.2753363847732544,
"learning_rate": 4.1945718818333834e-05,
"loss": 4.8185,
"step": 348500
},
{
"epoch": 14.711461450912616,
"grad_norm": 1.3008183240890503,
"learning_rate": 4.185996655661793e-05,
"loss": 4.8199,
"step": 349000
},
{
"epoch": 14.732538043249168,
"grad_norm": 1.2664154767990112,
"learning_rate": 4.177421429490203e-05,
"loss": 4.8203,
"step": 349500
},
{
"epoch": 14.753614635585718,
"grad_norm": 1.2957167625427246,
"learning_rate": 4.168846203318613e-05,
"loss": 4.8157,
"step": 350000
},
{
"epoch": 14.753614635585718,
"eval_accuracy": 0.7398309923643398,
"eval_loss": 4.572203636169434,
"eval_runtime": 260.5009,
"eval_samples_per_second": 463.361,
"eval_steps_per_second": 4.829,
"step": 350000
},
{
"epoch": 14.77469122792227,
"grad_norm": 1.2447220087051392,
"learning_rate": 4.160270977147022e-05,
"loss": 4.8155,
"step": 350500
},
{
"epoch": 14.79576782025882,
"grad_norm": 1.258647084236145,
"learning_rate": 4.1516957509754326e-05,
"loss": 4.8195,
"step": 351000
},
{
"epoch": 14.81684441259537,
"grad_norm": 1.2524493932724,
"learning_rate": 4.143120524803842e-05,
"loss": 4.8134,
"step": 351500
},
{
"epoch": 14.837921004931923,
"grad_norm": 1.3170547485351562,
"learning_rate": 4.1345452986322516e-05,
"loss": 4.8157,
"step": 352000
},
{
"epoch": 14.858997597268473,
"grad_norm": 1.2807785272598267,
"learning_rate": 4.1259700724606614e-05,
"loss": 4.8156,
"step": 352500
},
{
"epoch": 14.880074189605025,
"grad_norm": 1.2734440565109253,
"learning_rate": 4.117394846289071e-05,
"loss": 4.8155,
"step": 353000
},
{
"epoch": 14.901150781941576,
"grad_norm": 1.269304633140564,
"learning_rate": 4.1088196201174804e-05,
"loss": 4.8171,
"step": 353500
},
{
"epoch": 14.922227374278126,
"grad_norm": 1.2463005781173706,
"learning_rate": 4.100244393945891e-05,
"loss": 4.8131,
"step": 354000
},
{
"epoch": 14.943303966614678,
"grad_norm": 1.2605857849121094,
"learning_rate": 4.0916691677743e-05,
"loss": 4.8193,
"step": 354500
},
{
"epoch": 14.964380558951229,
"grad_norm": 1.2442375421524048,
"learning_rate": 4.08309394160271e-05,
"loss": 4.816,
"step": 355000
},
{
"epoch": 14.985457151287779,
"grad_norm": 1.2686355113983154,
"learning_rate": 4.07451871543112e-05,
"loss": 4.8146,
"step": 355500
},
{
"epoch": 15.006533743624331,
"grad_norm": 1.2469780445098877,
"learning_rate": 4.0659434892595295e-05,
"loss": 4.8095,
"step": 356000
},
{
"epoch": 15.027610335960881,
"grad_norm": 1.2668726444244385,
"learning_rate": 4.057368263087939e-05,
"loss": 4.8056,
"step": 356500
},
{
"epoch": 15.048686928297434,
"grad_norm": 1.252569556236267,
"learning_rate": 4.048793036916349e-05,
"loss": 4.8079,
"step": 357000
},
{
"epoch": 15.069763520633984,
"grad_norm": 1.2381128072738647,
"learning_rate": 4.0402178107447584e-05,
"loss": 4.8112,
"step": 357500
},
{
"epoch": 15.090840112970534,
"grad_norm": 1.3249460458755493,
"learning_rate": 4.031642584573168e-05,
"loss": 4.8103,
"step": 358000
},
{
"epoch": 15.111916705307086,
"grad_norm": 1.3176980018615723,
"learning_rate": 4.023067358401578e-05,
"loss": 4.8065,
"step": 358500
},
{
"epoch": 15.132993297643637,
"grad_norm": 1.3111411333084106,
"learning_rate": 4.014492132229988e-05,
"loss": 4.8102,
"step": 359000
},
{
"epoch": 15.154069889980189,
"grad_norm": 1.3023666143417358,
"learning_rate": 4.005916906058397e-05,
"loss": 4.8076,
"step": 359500
},
{
"epoch": 15.17514648231674,
"grad_norm": 1.239010214805603,
"learning_rate": 3.9973416798868075e-05,
"loss": 4.8097,
"step": 360000
},
{
"epoch": 15.17514648231674,
"eval_accuracy": 0.7403816003361652,
"eval_loss": 4.568600654602051,
"eval_runtime": 261.3988,
"eval_samples_per_second": 461.77,
"eval_steps_per_second": 4.813,
"step": 360000
},
{
"epoch": 15.19622307465329,
"grad_norm": 1.28749680519104,
"learning_rate": 3.988766453715217e-05,
"loss": 4.8103,
"step": 360500
},
{
"epoch": 15.217299666989842,
"grad_norm": 1.2523281574249268,
"learning_rate": 3.9801912275436265e-05,
"loss": 4.8094,
"step": 361000
},
{
"epoch": 15.238376259326392,
"grad_norm": 1.2758135795593262,
"learning_rate": 3.971616001372036e-05,
"loss": 4.8096,
"step": 361500
},
{
"epoch": 15.259452851662942,
"grad_norm": 1.2920376062393188,
"learning_rate": 3.963040775200446e-05,
"loss": 4.807,
"step": 362000
},
{
"epoch": 15.280529443999495,
"grad_norm": 1.325407862663269,
"learning_rate": 3.954465549028856e-05,
"loss": 4.8049,
"step": 362500
},
{
"epoch": 15.301606036336045,
"grad_norm": 1.285467267036438,
"learning_rate": 3.945890322857266e-05,
"loss": 4.8083,
"step": 363000
},
{
"epoch": 15.322682628672597,
"grad_norm": 1.2237058877944946,
"learning_rate": 3.9373150966856756e-05,
"loss": 4.8045,
"step": 363500
},
{
"epoch": 15.343759221009147,
"grad_norm": 1.25303053855896,
"learning_rate": 3.928739870514085e-05,
"loss": 4.805,
"step": 364000
},
{
"epoch": 15.364835813345698,
"grad_norm": 1.2906306982040405,
"learning_rate": 3.920164644342495e-05,
"loss": 4.8043,
"step": 364500
},
{
"epoch": 15.38591240568225,
"grad_norm": 1.2776191234588623,
"learning_rate": 3.9115894181709045e-05,
"loss": 4.8044,
"step": 365000
},
{
"epoch": 15.4069889980188,
"grad_norm": 1.2737802267074585,
"learning_rate": 3.903014191999314e-05,
"loss": 4.8012,
"step": 365500
},
{
"epoch": 15.42806559035535,
"grad_norm": 1.3048441410064697,
"learning_rate": 3.894438965827724e-05,
"loss": 4.8082,
"step": 366000
},
{
"epoch": 15.449142182691903,
"grad_norm": 1.2341302633285522,
"learning_rate": 3.885863739656134e-05,
"loss": 4.8062,
"step": 366500
},
{
"epoch": 15.470218775028453,
"grad_norm": 1.267170786857605,
"learning_rate": 3.877288513484543e-05,
"loss": 4.803,
"step": 367000
},
{
"epoch": 15.491295367365005,
"grad_norm": 1.2790710926055908,
"learning_rate": 3.868713287312953e-05,
"loss": 4.8058,
"step": 367500
},
{
"epoch": 15.512371959701555,
"grad_norm": 1.3136919736862183,
"learning_rate": 3.860138061141363e-05,
"loss": 4.8043,
"step": 368000
},
{
"epoch": 15.533448552038106,
"grad_norm": 1.2835540771484375,
"learning_rate": 3.8515628349697726e-05,
"loss": 4.8065,
"step": 368500
},
{
"epoch": 15.554525144374658,
"grad_norm": 1.2605501413345337,
"learning_rate": 3.842987608798182e-05,
"loss": 4.803,
"step": 369000
},
{
"epoch": 15.575601736711208,
"grad_norm": 1.3119114637374878,
"learning_rate": 3.834412382626592e-05,
"loss": 4.8014,
"step": 369500
},
{
"epoch": 15.596678329047759,
"grad_norm": 1.2742840051651,
"learning_rate": 3.8258371564550014e-05,
"loss": 4.8045,
"step": 370000
},
{
"epoch": 15.596678329047759,
"eval_accuracy": 0.7416206164253277,
"eval_loss": 4.55956506729126,
"eval_runtime": 261.1858,
"eval_samples_per_second": 462.146,
"eval_steps_per_second": 4.816,
"step": 370000
},
{
"epoch": 15.61775492138431,
"grad_norm": 1.2642006874084473,
"learning_rate": 3.817261930283411e-05,
"loss": 4.802,
"step": 370500
},
{
"epoch": 15.638831513720861,
"grad_norm": 1.3366634845733643,
"learning_rate": 3.808686704111821e-05,
"loss": 4.8029,
"step": 371000
},
{
"epoch": 15.659908106057413,
"grad_norm": 1.282545566558838,
"learning_rate": 3.800111477940231e-05,
"loss": 4.8017,
"step": 371500
},
{
"epoch": 15.680984698393964,
"grad_norm": 1.3031513690948486,
"learning_rate": 3.79153625176864e-05,
"loss": 4.8026,
"step": 372000
},
{
"epoch": 15.702061290730514,
"grad_norm": 1.2576713562011719,
"learning_rate": 3.7829610255970506e-05,
"loss": 4.8049,
"step": 372500
},
{
"epoch": 15.723137883067066,
"grad_norm": 1.258209228515625,
"learning_rate": 3.77438579942546e-05,
"loss": 4.8002,
"step": 373000
},
{
"epoch": 15.744214475403616,
"grad_norm": 1.2866992950439453,
"learning_rate": 3.7658105732538696e-05,
"loss": 4.796,
"step": 373500
},
{
"epoch": 15.765291067740169,
"grad_norm": 1.2193955183029175,
"learning_rate": 3.7572353470822794e-05,
"loss": 4.8023,
"step": 374000
},
{
"epoch": 15.786367660076719,
"grad_norm": 1.2491436004638672,
"learning_rate": 3.748660120910689e-05,
"loss": 4.7974,
"step": 374500
},
{
"epoch": 15.80744425241327,
"grad_norm": 1.2935534715652466,
"learning_rate": 3.740084894739099e-05,
"loss": 4.7954,
"step": 375000
},
{
"epoch": 15.828520844749821,
"grad_norm": 1.270989179611206,
"learning_rate": 3.731509668567509e-05,
"loss": 4.8016,
"step": 375500
},
{
"epoch": 15.849597437086372,
"grad_norm": 1.3364858627319336,
"learning_rate": 3.722934442395919e-05,
"loss": 4.801,
"step": 376000
},
{
"epoch": 15.870674029422922,
"grad_norm": 1.2828434705734253,
"learning_rate": 3.714359216224328e-05,
"loss": 4.7994,
"step": 376500
},
{
"epoch": 15.891750621759474,
"grad_norm": 1.246233582496643,
"learning_rate": 3.705783990052738e-05,
"loss": 4.8029,
"step": 377000
},
{
"epoch": 15.912827214096025,
"grad_norm": 1.2815263271331787,
"learning_rate": 3.6972087638811475e-05,
"loss": 4.7973,
"step": 377500
},
{
"epoch": 15.933903806432577,
"grad_norm": 1.2439547777175903,
"learning_rate": 3.6886335377095574e-05,
"loss": 4.7992,
"step": 378000
},
{
"epoch": 15.954980398769127,
"grad_norm": 1.25736665725708,
"learning_rate": 3.680058311537967e-05,
"loss": 4.7985,
"step": 378500
},
{
"epoch": 15.976056991105677,
"grad_norm": 1.2472095489501953,
"learning_rate": 3.671483085366377e-05,
"loss": 4.7997,
"step": 379000
},
{
"epoch": 15.99713358344223,
"grad_norm": 1.283774495124817,
"learning_rate": 3.662907859194786e-05,
"loss": 4.7955,
"step": 379500
},
{
"epoch": 16.01821017577878,
"grad_norm": 1.3039425611495972,
"learning_rate": 3.654332633023197e-05,
"loss": 4.7951,
"step": 380000
},
{
"epoch": 16.01821017577878,
"eval_accuracy": 0.7427494921479412,
"eval_loss": 4.554550647735596,
"eval_runtime": 261.4964,
"eval_samples_per_second": 461.597,
"eval_steps_per_second": 4.811,
"step": 380000
},
{
"epoch": 16.039286768115332,
"grad_norm": 1.2317750453948975,
"learning_rate": 3.645757406851606e-05,
"loss": 4.7935,
"step": 380500
},
{
"epoch": 16.06036336045188,
"grad_norm": 1.3246266841888428,
"learning_rate": 3.6371821806800157e-05,
"loss": 4.7914,
"step": 381000
},
{
"epoch": 16.081439952788433,
"grad_norm": 1.245811104774475,
"learning_rate": 3.6286069545084255e-05,
"loss": 4.7958,
"step": 381500
},
{
"epoch": 16.102516545124985,
"grad_norm": 1.3091051578521729,
"learning_rate": 3.620031728336835e-05,
"loss": 4.7925,
"step": 382000
},
{
"epoch": 16.123593137461537,
"grad_norm": 1.2579543590545654,
"learning_rate": 3.6114565021652445e-05,
"loss": 4.7878,
"step": 382500
},
{
"epoch": 16.144669729798085,
"grad_norm": 1.2578227519989014,
"learning_rate": 3.602881275993655e-05,
"loss": 4.7901,
"step": 383000
},
{
"epoch": 16.165746322134638,
"grad_norm": 1.2853277921676636,
"learning_rate": 3.594306049822064e-05,
"loss": 4.7944,
"step": 383500
},
{
"epoch": 16.18682291447119,
"grad_norm": 1.301810622215271,
"learning_rate": 3.585730823650474e-05,
"loss": 4.793,
"step": 384000
},
{
"epoch": 16.20789950680774,
"grad_norm": 1.287253737449646,
"learning_rate": 3.577155597478883e-05,
"loss": 4.7925,
"step": 384500
},
{
"epoch": 16.22897609914429,
"grad_norm": 1.3141502141952515,
"learning_rate": 3.5685803713072936e-05,
"loss": 4.7869,
"step": 385000
},
{
"epoch": 16.250052691480843,
"grad_norm": 1.2628055810928345,
"learning_rate": 3.560005145135703e-05,
"loss": 4.7904,
"step": 385500
},
{
"epoch": 16.27112928381739,
"grad_norm": 1.3201873302459717,
"learning_rate": 3.5514299189641126e-05,
"loss": 4.7876,
"step": 386000
},
{
"epoch": 16.292205876153943,
"grad_norm": 1.2615625858306885,
"learning_rate": 3.5428546927925224e-05,
"loss": 4.794,
"step": 386500
},
{
"epoch": 16.313282468490495,
"grad_norm": 1.253743290901184,
"learning_rate": 3.534279466620932e-05,
"loss": 4.7898,
"step": 387000
},
{
"epoch": 16.334359060827044,
"grad_norm": 1.2864294052124023,
"learning_rate": 3.525704240449342e-05,
"loss": 4.789,
"step": 387500
},
{
"epoch": 16.355435653163596,
"grad_norm": 1.3105510473251343,
"learning_rate": 3.517129014277752e-05,
"loss": 4.788,
"step": 388000
},
{
"epoch": 16.376512245500148,
"grad_norm": 1.2848082780838013,
"learning_rate": 3.508553788106161e-05,
"loss": 4.7869,
"step": 388500
},
{
"epoch": 16.3975888378367,
"grad_norm": 1.2540411949157715,
"learning_rate": 3.499978561934571e-05,
"loss": 4.787,
"step": 389000
},
{
"epoch": 16.41866543017325,
"grad_norm": 1.2631422281265259,
"learning_rate": 3.491403335762981e-05,
"loss": 4.7894,
"step": 389500
},
{
"epoch": 16.4397420225098,
"grad_norm": 1.63542640209198,
"learning_rate": 3.4828281095913906e-05,
"loss": 4.7906,
"step": 390000
},
{
"epoch": 16.4397420225098,
"eval_accuracy": 0.7434117331705465,
"eval_loss": 4.548714637756348,
"eval_runtime": 261.6096,
"eval_samples_per_second": 461.397,
"eval_steps_per_second": 4.809,
"step": 390000
},
{
"epoch": 16.460818614846353,
"grad_norm": 1.2889297008514404,
"learning_rate": 3.4742528834198004e-05,
"loss": 4.7861,
"step": 390500
},
{
"epoch": 16.4818952071829,
"grad_norm": 1.2491283416748047,
"learning_rate": 3.46567765724821e-05,
"loss": 4.7902,
"step": 391000
},
{
"epoch": 16.502971799519454,
"grad_norm": 1.3024142980575562,
"learning_rate": 3.45710243107662e-05,
"loss": 4.7879,
"step": 391500
},
{
"epoch": 16.524048391856006,
"grad_norm": 1.287690281867981,
"learning_rate": 3.448527204905029e-05,
"loss": 4.7933,
"step": 392000
},
{
"epoch": 16.545124984192555,
"grad_norm": 1.2766319513320923,
"learning_rate": 3.43995197873344e-05,
"loss": 4.7848,
"step": 392500
},
{
"epoch": 16.566201576529107,
"grad_norm": 1.2980682849884033,
"learning_rate": 3.431376752561849e-05,
"loss": 4.7881,
"step": 393000
},
{
"epoch": 16.58727816886566,
"grad_norm": 1.245545744895935,
"learning_rate": 3.422801526390259e-05,
"loss": 4.79,
"step": 393500
},
{
"epoch": 16.608354761202207,
"grad_norm": 1.263808250427246,
"learning_rate": 3.4142263002186686e-05,
"loss": 4.788,
"step": 394000
},
{
"epoch": 16.62943135353876,
"grad_norm": 1.2918981313705444,
"learning_rate": 3.4056510740470784e-05,
"loss": 4.7921,
"step": 394500
},
{
"epoch": 16.65050794587531,
"grad_norm": 1.3072043657302856,
"learning_rate": 3.3970758478754875e-05,
"loss": 4.7876,
"step": 395000
},
{
"epoch": 16.67158453821186,
"grad_norm": 1.246109127998352,
"learning_rate": 3.388500621703898e-05,
"loss": 4.7872,
"step": 395500
},
{
"epoch": 16.692661130548412,
"grad_norm": 1.324275016784668,
"learning_rate": 3.379925395532307e-05,
"loss": 4.7881,
"step": 396000
},
{
"epoch": 16.713737722884964,
"grad_norm": 1.231432557106018,
"learning_rate": 3.371350169360717e-05,
"loss": 4.7856,
"step": 396500
},
{
"epoch": 16.734814315221517,
"grad_norm": 1.2830113172531128,
"learning_rate": 3.362774943189127e-05,
"loss": 4.7886,
"step": 397000
},
{
"epoch": 16.755890907558065,
"grad_norm": 1.240465521812439,
"learning_rate": 3.354199717017537e-05,
"loss": 4.7848,
"step": 397500
},
{
"epoch": 16.776967499894617,
"grad_norm": 1.3081951141357422,
"learning_rate": 3.345624490845946e-05,
"loss": 4.7855,
"step": 398000
},
{
"epoch": 16.79804409223117,
"grad_norm": 1.228482723236084,
"learning_rate": 3.3370492646743564e-05,
"loss": 4.7835,
"step": 398500
},
{
"epoch": 16.819120684567718,
"grad_norm": 1.2448921203613281,
"learning_rate": 3.3284740385027655e-05,
"loss": 4.7908,
"step": 399000
},
{
"epoch": 16.84019727690427,
"grad_norm": 1.3560943603515625,
"learning_rate": 3.319898812331175e-05,
"loss": 4.7813,
"step": 399500
},
{
"epoch": 16.861273869240822,
"grad_norm": 1.314419150352478,
"learning_rate": 3.311323586159585e-05,
"loss": 4.7858,
"step": 400000
},
{
"epoch": 16.861273869240822,
"eval_accuracy": 0.744149878754249,
"eval_loss": 4.545226573944092,
"eval_runtime": 261.8135,
"eval_samples_per_second": 461.038,
"eval_steps_per_second": 4.805,
"step": 400000
},
{
"epoch": 16.88235046157737,
"grad_norm": 1.2884814739227295,
"learning_rate": 3.302748359987995e-05,
"loss": 4.7816,
"step": 400500
},
{
"epoch": 16.903427053913923,
"grad_norm": 1.2599024772644043,
"learning_rate": 3.294173133816404e-05,
"loss": 4.7838,
"step": 401000
},
{
"epoch": 16.924503646250475,
"grad_norm": 3.098177671432495,
"learning_rate": 3.2855979076448147e-05,
"loss": 4.7881,
"step": 401500
},
{
"epoch": 16.945580238587024,
"grad_norm": 1.312930941581726,
"learning_rate": 3.277022681473224e-05,
"loss": 4.7852,
"step": 402000
},
{
"epoch": 16.966656830923576,
"grad_norm": 1.3211047649383545,
"learning_rate": 3.2684474553016336e-05,
"loss": 4.7841,
"step": 402500
},
{
"epoch": 16.987733423260128,
"grad_norm": 1.4022953510284424,
"learning_rate": 3.2598722291300435e-05,
"loss": 4.7855,
"step": 403000
},
{
"epoch": 17.00881001559668,
"grad_norm": 1.2829910516738892,
"learning_rate": 3.251297002958453e-05,
"loss": 4.7824,
"step": 403500
},
{
"epoch": 17.02988660793323,
"grad_norm": 1.2845124006271362,
"learning_rate": 3.242721776786863e-05,
"loss": 4.7824,
"step": 404000
},
{
"epoch": 17.05096320026978,
"grad_norm": 1.260939359664917,
"learning_rate": 3.234146550615272e-05,
"loss": 4.778,
"step": 404500
},
{
"epoch": 17.072039792606333,
"grad_norm": 1.3211950063705444,
"learning_rate": 3.225571324443683e-05,
"loss": 4.78,
"step": 405000
},
{
"epoch": 17.09311638494288,
"grad_norm": 1.3032972812652588,
"learning_rate": 3.216996098272092e-05,
"loss": 4.7809,
"step": 405500
},
{
"epoch": 17.114192977279433,
"grad_norm": 1.2794256210327148,
"learning_rate": 3.208420872100502e-05,
"loss": 4.7802,
"step": 406000
},
{
"epoch": 17.135269569615986,
"grad_norm": 1.2948325872421265,
"learning_rate": 3.1998456459289116e-05,
"loss": 4.7763,
"step": 406500
},
{
"epoch": 17.156346161952534,
"grad_norm": 1.2595492601394653,
"learning_rate": 3.1912704197573214e-05,
"loss": 4.7755,
"step": 407000
},
{
"epoch": 17.177422754289086,
"grad_norm": 1.2940860986709595,
"learning_rate": 3.1826951935857306e-05,
"loss": 4.7752,
"step": 407500
},
{
"epoch": 17.19849934662564,
"grad_norm": 1.2965643405914307,
"learning_rate": 3.174119967414141e-05,
"loss": 4.7796,
"step": 408000
},
{
"epoch": 17.219575938962187,
"grad_norm": 1.3052237033843994,
"learning_rate": 3.16554474124255e-05,
"loss": 4.7779,
"step": 408500
},
{
"epoch": 17.24065253129874,
"grad_norm": 1.2727681398391724,
"learning_rate": 3.15696951507096e-05,
"loss": 4.7804,
"step": 409000
},
{
"epoch": 17.26172912363529,
"grad_norm": 1.345372200012207,
"learning_rate": 3.14839428889937e-05,
"loss": 4.7777,
"step": 409500
},
{
"epoch": 17.282805715971843,
"grad_norm": 1.2977099418640137,
"learning_rate": 3.13981906272778e-05,
"loss": 4.7798,
"step": 410000
},
{
"epoch": 17.282805715971843,
"eval_accuracy": 0.7451750456515381,
"eval_loss": 4.53807258605957,
"eval_runtime": 261.22,
"eval_samples_per_second": 462.086,
"eval_steps_per_second": 4.816,
"step": 410000
},
{
"epoch": 17.303882308308392,
"grad_norm": 1.3410139083862305,
"learning_rate": 3.131243836556189e-05,
"loss": 4.7826,
"step": 410500
},
{
"epoch": 17.324958900644944,
"grad_norm": 1.3018629550933838,
"learning_rate": 3.1226686103845994e-05,
"loss": 4.7755,
"step": 411000
},
{
"epoch": 17.346035492981496,
"grad_norm": 1.2718100547790527,
"learning_rate": 3.1140933842130086e-05,
"loss": 4.775,
"step": 411500
},
{
"epoch": 17.367112085318045,
"grad_norm": 1.3698660135269165,
"learning_rate": 3.1055181580414184e-05,
"loss": 4.7783,
"step": 412000
},
{
"epoch": 17.388188677654597,
"grad_norm": 1.2649726867675781,
"learning_rate": 3.096942931869828e-05,
"loss": 4.7799,
"step": 412500
},
{
"epoch": 17.40926526999115,
"grad_norm": 11.488563537597656,
"learning_rate": 3.088367705698238e-05,
"loss": 4.7733,
"step": 413000
},
{
"epoch": 17.430341862327698,
"grad_norm": 1.2406359910964966,
"learning_rate": 3.079792479526647e-05,
"loss": 4.7801,
"step": 413500
},
{
"epoch": 17.45141845466425,
"grad_norm": 1.24382483959198,
"learning_rate": 3.071217253355058e-05,
"loss": 4.7747,
"step": 414000
},
{
"epoch": 17.472495047000802,
"grad_norm": 1.2766995429992676,
"learning_rate": 3.062642027183467e-05,
"loss": 4.7741,
"step": 414500
},
{
"epoch": 17.49357163933735,
"grad_norm": 1.3038281202316284,
"learning_rate": 3.054066801011877e-05,
"loss": 4.7758,
"step": 415000
},
{
"epoch": 17.514648231673903,
"grad_norm": 1.2977404594421387,
"learning_rate": 3.045491574840287e-05,
"loss": 4.7782,
"step": 415500
},
{
"epoch": 17.535724824010455,
"grad_norm": 1.3087376356124878,
"learning_rate": 3.0369163486686964e-05,
"loss": 4.7745,
"step": 416000
},
{
"epoch": 17.556801416347007,
"grad_norm": 1.307850956916809,
"learning_rate": 3.028341122497106e-05,
"loss": 4.7721,
"step": 416500
},
{
"epoch": 17.577878008683555,
"grad_norm": 1.3581358194351196,
"learning_rate": 3.019765896325516e-05,
"loss": 4.7781,
"step": 417000
},
{
"epoch": 17.598954601020107,
"grad_norm": 1.3390425443649292,
"learning_rate": 3.0111906701539255e-05,
"loss": 4.7756,
"step": 417500
},
{
"epoch": 17.62003119335666,
"grad_norm": 1.301466703414917,
"learning_rate": 3.002615443982335e-05,
"loss": 4.7728,
"step": 418000
},
{
"epoch": 17.641107785693208,
"grad_norm": 1.299738883972168,
"learning_rate": 2.9940402178107452e-05,
"loss": 4.7733,
"step": 418500
},
{
"epoch": 17.66218437802976,
"grad_norm": 1.3140435218811035,
"learning_rate": 2.9854649916391547e-05,
"loss": 4.7703,
"step": 419000
},
{
"epoch": 17.683260970366312,
"grad_norm": 1.3404839038848877,
"learning_rate": 2.976889765467564e-05,
"loss": 4.7766,
"step": 419500
},
{
"epoch": 17.70433756270286,
"grad_norm": 1.285805583000183,
"learning_rate": 2.9683145392959737e-05,
"loss": 4.7725,
"step": 420000
},
{
"epoch": 17.70433756270286,
"eval_accuracy": 0.7460531419088614,
"eval_loss": 4.532994270324707,
"eval_runtime": 261.155,
"eval_samples_per_second": 462.201,
"eval_steps_per_second": 4.817,
"step": 420000
},
{
"epoch": 17.725414155039413,
"grad_norm": 1.3289742469787598,
"learning_rate": 2.9597393131243838e-05,
"loss": 4.7724,
"step": 420500
},
{
"epoch": 17.746490747375965,
"grad_norm": 1.2588924169540405,
"learning_rate": 2.9511640869527933e-05,
"loss": 4.768,
"step": 421000
},
{
"epoch": 17.767567339712514,
"grad_norm": 1.2974965572357178,
"learning_rate": 2.942588860781203e-05,
"loss": 4.7684,
"step": 421500
},
{
"epoch": 17.788643932049066,
"grad_norm": 1.2711418867111206,
"learning_rate": 2.934013634609613e-05,
"loss": 4.7723,
"step": 422000
},
{
"epoch": 17.809720524385618,
"grad_norm": 1.2917659282684326,
"learning_rate": 2.9254384084380225e-05,
"loss": 4.7726,
"step": 422500
},
{
"epoch": 17.830797116722167,
"grad_norm": 18.45638656616211,
"learning_rate": 2.9168631822664323e-05,
"loss": 4.773,
"step": 423000
},
{
"epoch": 17.85187370905872,
"grad_norm": 1.2769232988357544,
"learning_rate": 2.908287956094842e-05,
"loss": 4.7738,
"step": 423500
},
{
"epoch": 17.87295030139527,
"grad_norm": 1.2542996406555176,
"learning_rate": 2.899712729923252e-05,
"loss": 4.7719,
"step": 424000
},
{
"epoch": 17.894026893731823,
"grad_norm": 1.2704521417617798,
"learning_rate": 2.8911375037516615e-05,
"loss": 4.7717,
"step": 424500
},
{
"epoch": 17.91510348606837,
"grad_norm": 1.271987795829773,
"learning_rate": 2.8825622775800716e-05,
"loss": 4.7725,
"step": 425000
},
{
"epoch": 17.936180078404924,
"grad_norm": 1.2958498001098633,
"learning_rate": 2.873987051408481e-05,
"loss": 4.7702,
"step": 425500
},
{
"epoch": 17.957256670741476,
"grad_norm": 1.3020325899124146,
"learning_rate": 2.8654118252368906e-05,
"loss": 4.7673,
"step": 426000
},
{
"epoch": 17.978333263078024,
"grad_norm": 1.3393250703811646,
"learning_rate": 2.8568365990653008e-05,
"loss": 4.7687,
"step": 426500
},
{
"epoch": 17.999409855414576,
"grad_norm": 1.27121102809906,
"learning_rate": 2.8482613728937103e-05,
"loss": 4.7703,
"step": 427000
},
{
"epoch": 18.02048644775113,
"grad_norm": 1.2881697416305542,
"learning_rate": 2.8396861467221198e-05,
"loss": 4.766,
"step": 427500
},
{
"epoch": 18.041563040087677,
"grad_norm": 1.3251922130584717,
"learning_rate": 2.83111092055053e-05,
"loss": 4.7653,
"step": 428000
},
{
"epoch": 18.06263963242423,
"grad_norm": 1.2815443277359009,
"learning_rate": 2.8225356943789394e-05,
"loss": 4.7655,
"step": 428500
},
{
"epoch": 18.08371622476078,
"grad_norm": 1.2982913255691528,
"learning_rate": 2.813960468207349e-05,
"loss": 4.7653,
"step": 429000
},
{
"epoch": 18.10479281709733,
"grad_norm": 1.2621245384216309,
"learning_rate": 2.805385242035759e-05,
"loss": 4.7633,
"step": 429500
},
{
"epoch": 18.125869409433882,
"grad_norm": 1.3368629217147827,
"learning_rate": 2.7968100158641686e-05,
"loss": 4.7628,
"step": 430000
},
{
"epoch": 18.125869409433882,
"eval_accuracy": 0.746643080460293,
"eval_loss": 4.530001163482666,
"eval_runtime": 262.0726,
"eval_samples_per_second": 460.582,
"eval_steps_per_second": 4.8,
"step": 430000
},
{
"epoch": 18.146946001770434,
"grad_norm": 1.3112956285476685,
"learning_rate": 2.788234789692578e-05,
"loss": 4.7632,
"step": 430500
},
{
"epoch": 18.168022594106986,
"grad_norm": 1.3063631057739258,
"learning_rate": 2.7796595635209882e-05,
"loss": 4.7648,
"step": 431000
},
{
"epoch": 18.189099186443535,
"grad_norm": 1.2684001922607422,
"learning_rate": 2.7710843373493977e-05,
"loss": 4.7667,
"step": 431500
},
{
"epoch": 18.210175778780087,
"grad_norm": 1.3133608102798462,
"learning_rate": 2.7625091111778072e-05,
"loss": 4.7676,
"step": 432000
},
{
"epoch": 18.23125237111664,
"grad_norm": 1.3357317447662354,
"learning_rate": 2.7539338850062174e-05,
"loss": 4.7642,
"step": 432500
},
{
"epoch": 18.252328963453188,
"grad_norm": 1.2716515064239502,
"learning_rate": 2.745358658834627e-05,
"loss": 4.7658,
"step": 433000
},
{
"epoch": 18.27340555578974,
"grad_norm": 1.302572250366211,
"learning_rate": 2.7367834326630364e-05,
"loss": 4.7618,
"step": 433500
},
{
"epoch": 18.294482148126292,
"grad_norm": 1.2975118160247803,
"learning_rate": 2.7282082064914465e-05,
"loss": 4.7628,
"step": 434000
},
{
"epoch": 18.31555874046284,
"grad_norm": 1.3110864162445068,
"learning_rate": 2.719632980319856e-05,
"loss": 4.761,
"step": 434500
},
{
"epoch": 18.336635332799393,
"grad_norm": 1.2881181240081787,
"learning_rate": 2.7110577541482655e-05,
"loss": 4.7624,
"step": 435000
},
{
"epoch": 18.357711925135945,
"grad_norm": 1.2837979793548584,
"learning_rate": 2.7024825279766757e-05,
"loss": 4.7655,
"step": 435500
},
{
"epoch": 18.378788517472493,
"grad_norm": 1.2968195676803589,
"learning_rate": 2.6939073018050852e-05,
"loss": 4.7639,
"step": 436000
},
{
"epoch": 18.399865109809046,
"grad_norm": 1.2873263359069824,
"learning_rate": 2.685332075633495e-05,
"loss": 4.7635,
"step": 436500
},
{
"epoch": 18.420941702145598,
"grad_norm": 1.2749794721603394,
"learning_rate": 2.6767568494619045e-05,
"loss": 4.7628,
"step": 437000
},
{
"epoch": 18.44201829448215,
"grad_norm": 1.287791132926941,
"learning_rate": 2.6681816232903143e-05,
"loss": 4.7602,
"step": 437500
},
{
"epoch": 18.4630948868187,
"grad_norm": 1.3127174377441406,
"learning_rate": 2.6596063971187242e-05,
"loss": 4.7581,
"step": 438000
},
{
"epoch": 18.48417147915525,
"grad_norm": 1.3641440868377686,
"learning_rate": 2.6510311709471337e-05,
"loss": 4.7603,
"step": 438500
},
{
"epoch": 18.505248071491803,
"grad_norm": 1.2924662828445435,
"learning_rate": 2.642455944775544e-05,
"loss": 4.7611,
"step": 439000
},
{
"epoch": 18.52632466382835,
"grad_norm": 1.2854405641555786,
"learning_rate": 2.6338807186039533e-05,
"loss": 4.7603,
"step": 439500
},
{
"epoch": 18.547401256164903,
"grad_norm": 1.3063039779663086,
"learning_rate": 2.6253054924323628e-05,
"loss": 4.7584,
"step": 440000
},
{
"epoch": 18.547401256164903,
"eval_accuracy": 0.7472322018415593,
"eval_loss": 4.525060176849365,
"eval_runtime": 264.7833,
"eval_samples_per_second": 455.867,
"eval_steps_per_second": 4.751,
"step": 440000
},
{
"epoch": 18.568477848501455,
"grad_norm": 1.312887191772461,
"learning_rate": 2.616730266260773e-05,
"loss": 4.7592,
"step": 440500
},
{
"epoch": 18.589554440838004,
"grad_norm": 1.2635859251022339,
"learning_rate": 2.6081550400891825e-05,
"loss": 4.7599,
"step": 441000
},
{
"epoch": 18.610631033174556,
"grad_norm": 1.297837734222412,
"learning_rate": 2.599579813917592e-05,
"loss": 4.7609,
"step": 441500
},
{
"epoch": 18.63170762551111,
"grad_norm": 1.3110982179641724,
"learning_rate": 2.591004587746002e-05,
"loss": 4.7605,
"step": 442000
},
{
"epoch": 18.652784217847657,
"grad_norm": 1.3118677139282227,
"learning_rate": 2.5824293615744116e-05,
"loss": 4.7592,
"step": 442500
},
{
"epoch": 18.67386081018421,
"grad_norm": 1.337641954421997,
"learning_rate": 2.573854135402821e-05,
"loss": 4.7606,
"step": 443000
},
{
"epoch": 18.69493740252076,
"grad_norm": 1.3019428253173828,
"learning_rate": 2.5652789092312313e-05,
"loss": 4.759,
"step": 443500
},
{
"epoch": 18.716013994857313,
"grad_norm": 1.3120992183685303,
"learning_rate": 2.5567036830596408e-05,
"loss": 4.7584,
"step": 444000
},
{
"epoch": 18.737090587193862,
"grad_norm": 1.3041455745697021,
"learning_rate": 2.5481284568880503e-05,
"loss": 4.7608,
"step": 444500
},
{
"epoch": 18.758167179530414,
"grad_norm": 1.2992240190505981,
"learning_rate": 2.5395532307164605e-05,
"loss": 4.7557,
"step": 445000
},
{
"epoch": 18.779243771866966,
"grad_norm": 1.3250569105148315,
"learning_rate": 2.53097800454487e-05,
"loss": 4.7617,
"step": 445500
},
{
"epoch": 18.800320364203515,
"grad_norm": 1.3373744487762451,
"learning_rate": 2.5224027783732794e-05,
"loss": 4.7608,
"step": 446000
},
{
"epoch": 18.821396956540067,
"grad_norm": 1.3832685947418213,
"learning_rate": 2.5138275522016896e-05,
"loss": 4.7533,
"step": 446500
},
{
"epoch": 18.84247354887662,
"grad_norm": 1.3216545581817627,
"learning_rate": 2.505252326030099e-05,
"loss": 4.7562,
"step": 447000
},
{
"epoch": 18.863550141213167,
"grad_norm": 1.3102269172668457,
"learning_rate": 2.496677099858509e-05,
"loss": 4.7576,
"step": 447500
},
{
"epoch": 18.88462673354972,
"grad_norm": 1.2707276344299316,
"learning_rate": 2.4881018736869184e-05,
"loss": 4.7601,
"step": 448000
},
{
"epoch": 18.90570332588627,
"grad_norm": 1.3279458284378052,
"learning_rate": 2.4795266475153283e-05,
"loss": 4.7546,
"step": 448500
},
{
"epoch": 18.92677991822282,
"grad_norm": 1.2959898710250854,
"learning_rate": 2.470951421343738e-05,
"loss": 4.7605,
"step": 449000
},
{
"epoch": 18.947856510559372,
"grad_norm": 1.272985816001892,
"learning_rate": 2.4623761951721476e-05,
"loss": 4.7558,
"step": 449500
},
{
"epoch": 18.968933102895924,
"grad_norm": 1.3414027690887451,
"learning_rate": 2.4538009690005574e-05,
"loss": 4.7573,
"step": 450000
},
{
"epoch": 18.968933102895924,
"eval_accuracy": 0.7482787955138648,
"eval_loss": 4.519334316253662,
"eval_runtime": 264.4617,
"eval_samples_per_second": 456.421,
"eval_steps_per_second": 4.757,
"step": 450000
},
{
"epoch": 18.990009695232473,
"grad_norm": 1.3086925745010376,
"learning_rate": 2.4452257428289672e-05,
"loss": 4.7535,
"step": 450500
},
{
"epoch": 19.011086287569025,
"grad_norm": 1.326367735862732,
"learning_rate": 2.436650516657377e-05,
"loss": 4.7521,
"step": 451000
},
{
"epoch": 19.032162879905577,
"grad_norm": 1.2998569011688232,
"learning_rate": 2.428075290485787e-05,
"loss": 4.7506,
"step": 451500
},
{
"epoch": 19.05323947224213,
"grad_norm": 1.334794044494629,
"learning_rate": 2.4195000643141964e-05,
"loss": 4.7534,
"step": 452000
},
{
"epoch": 19.074316064578678,
"grad_norm": 1.2814232110977173,
"learning_rate": 2.4109248381426062e-05,
"loss": 4.7558,
"step": 452500
},
{
"epoch": 19.09539265691523,
"grad_norm": 1.3276777267456055,
"learning_rate": 2.402349611971016e-05,
"loss": 4.7535,
"step": 453000
},
{
"epoch": 19.116469249251782,
"grad_norm": 1.2740899324417114,
"learning_rate": 2.3937743857994255e-05,
"loss": 4.7496,
"step": 453500
},
{
"epoch": 19.13754584158833,
"grad_norm": 1.3209656476974487,
"learning_rate": 2.3851991596278354e-05,
"loss": 4.7542,
"step": 454000
},
{
"epoch": 19.158622433924883,
"grad_norm": 1.3521721363067627,
"learning_rate": 2.3766239334562452e-05,
"loss": 4.7505,
"step": 454500
},
{
"epoch": 19.179699026261435,
"grad_norm": 1.3609946966171265,
"learning_rate": 2.3680487072846547e-05,
"loss": 4.7494,
"step": 455000
},
{
"epoch": 19.200775618597984,
"grad_norm": 1.3862378597259521,
"learning_rate": 2.3594734811130645e-05,
"loss": 4.7474,
"step": 455500
},
{
"epoch": 19.221852210934536,
"grad_norm": 1.3044368028640747,
"learning_rate": 2.3508982549414744e-05,
"loss": 4.7497,
"step": 456000
},
{
"epoch": 19.242928803271088,
"grad_norm": 1.3152740001678467,
"learning_rate": 2.342323028769884e-05,
"loss": 4.7515,
"step": 456500
},
{
"epoch": 19.264005395607636,
"grad_norm": 1.293208360671997,
"learning_rate": 2.3337478025982937e-05,
"loss": 4.7494,
"step": 457000
},
{
"epoch": 19.28508198794419,
"grad_norm": 1.327202558517456,
"learning_rate": 2.3251725764267035e-05,
"loss": 4.7511,
"step": 457500
},
{
"epoch": 19.30615858028074,
"grad_norm": 1.2733328342437744,
"learning_rate": 2.316597350255113e-05,
"loss": 4.7554,
"step": 458000
},
{
"epoch": 19.327235172617293,
"grad_norm": 1.3104603290557861,
"learning_rate": 2.308022124083523e-05,
"loss": 4.7506,
"step": 458500
},
{
"epoch": 19.34831176495384,
"grad_norm": 1.3377212285995483,
"learning_rate": 2.2994468979119323e-05,
"loss": 4.7529,
"step": 459000
},
{
"epoch": 19.369388357290394,
"grad_norm": 1.3403338193893433,
"learning_rate": 2.290871671740342e-05,
"loss": 4.7495,
"step": 459500
},
{
"epoch": 19.390464949626946,
"grad_norm": 1.3086553812026978,
"learning_rate": 2.282296445568752e-05,
"loss": 4.7502,
"step": 460000
},
{
"epoch": 19.390464949626946,
"eval_accuracy": 0.7487924927211953,
"eval_loss": 4.5153937339782715,
"eval_runtime": 263.4599,
"eval_samples_per_second": 458.157,
"eval_steps_per_second": 4.775,
"step": 460000
},
{
"epoch": 19.411541541963494,
"grad_norm": 1.3441835641860962,
"learning_rate": 2.2737212193971615e-05,
"loss": 4.7481,
"step": 460500
},
{
"epoch": 19.432618134300046,
"grad_norm": 1.2872428894042969,
"learning_rate": 2.2651459932255713e-05,
"loss": 4.7523,
"step": 461000
},
{
"epoch": 19.4536947266366,
"grad_norm": 1.293531060218811,
"learning_rate": 2.256570767053981e-05,
"loss": 4.747,
"step": 461500
},
{
"epoch": 19.474771318973147,
"grad_norm": 1.2957544326782227,
"learning_rate": 2.2479955408823906e-05,
"loss": 4.7468,
"step": 462000
},
{
"epoch": 19.4958479113097,
"grad_norm": 1.282886028289795,
"learning_rate": 2.2394203147108005e-05,
"loss": 4.7495,
"step": 462500
},
{
"epoch": 19.51692450364625,
"grad_norm": 1.3213191032409668,
"learning_rate": 2.2308450885392103e-05,
"loss": 4.7466,
"step": 463000
},
{
"epoch": 19.5380010959828,
"grad_norm": 1.3515186309814453,
"learning_rate": 2.22226986236762e-05,
"loss": 4.7465,
"step": 463500
},
{
"epoch": 19.559077688319352,
"grad_norm": 1.2887663841247559,
"learning_rate": 2.21369463619603e-05,
"loss": 4.7495,
"step": 464000
},
{
"epoch": 19.580154280655904,
"grad_norm": 1.3452165126800537,
"learning_rate": 2.2051194100244394e-05,
"loss": 4.7478,
"step": 464500
},
{
"epoch": 19.601230872992456,
"grad_norm": 1.3580539226531982,
"learning_rate": 2.1965441838528493e-05,
"loss": 4.7492,
"step": 465000
},
{
"epoch": 19.622307465329005,
"grad_norm": 1.2966126203536987,
"learning_rate": 2.187968957681259e-05,
"loss": 4.7481,
"step": 465500
},
{
"epoch": 19.643384057665557,
"grad_norm": 1.2706109285354614,
"learning_rate": 2.179393731509669e-05,
"loss": 4.7507,
"step": 466000
},
{
"epoch": 19.66446065000211,
"grad_norm": 1.3263022899627686,
"learning_rate": 2.1708185053380784e-05,
"loss": 4.7457,
"step": 466500
},
{
"epoch": 19.685537242338658,
"grad_norm": 1.3160582780838013,
"learning_rate": 2.1622432791664883e-05,
"loss": 4.7494,
"step": 467000
},
{
"epoch": 19.70661383467521,
"grad_norm": 1.3117730617523193,
"learning_rate": 2.1536680529948978e-05,
"loss": 4.7461,
"step": 467500
},
{
"epoch": 19.727690427011762,
"grad_norm": 4.170529365539551,
"learning_rate": 2.1450928268233076e-05,
"loss": 4.7485,
"step": 468000
},
{
"epoch": 19.74876701934831,
"grad_norm": 1.308323621749878,
"learning_rate": 2.1365176006517174e-05,
"loss": 4.7448,
"step": 468500
},
{
"epoch": 19.769843611684863,
"grad_norm": 1.3604735136032104,
"learning_rate": 2.127942374480127e-05,
"loss": 4.7471,
"step": 469000
},
{
"epoch": 19.790920204021415,
"grad_norm": 1.2891919612884521,
"learning_rate": 2.1193671483085367e-05,
"loss": 4.7511,
"step": 469500
},
{
"epoch": 19.811996796357963,
"grad_norm": 1.317630648612976,
"learning_rate": 2.1107919221369466e-05,
"loss": 4.7482,
"step": 470000
},
{
"epoch": 19.811996796357963,
"eval_accuracy": 0.7495855960942042,
"eval_loss": 4.510889530181885,
"eval_runtime": 261.0374,
"eval_samples_per_second": 462.409,
"eval_steps_per_second": 4.819,
"step": 470000
},
{
"epoch": 19.833073388694515,
"grad_norm": 1.3223235607147217,
"learning_rate": 2.102216695965356e-05,
"loss": 4.7477,
"step": 470500
},
{
"epoch": 19.854149981031068,
"grad_norm": 1.304041862487793,
"learning_rate": 2.093641469793766e-05,
"loss": 4.7445,
"step": 471000
},
{
"epoch": 19.87522657336762,
"grad_norm": 1.2890863418579102,
"learning_rate": 2.0850662436221757e-05,
"loss": 4.7449,
"step": 471500
},
{
"epoch": 19.89630316570417,
"grad_norm": 1.327549934387207,
"learning_rate": 2.0764910174505852e-05,
"loss": 4.7455,
"step": 472000
},
{
"epoch": 19.91737975804072,
"grad_norm": 1.2899245023727417,
"learning_rate": 2.067915791278995e-05,
"loss": 4.7469,
"step": 472500
},
{
"epoch": 19.938456350377272,
"grad_norm": 1.3785020112991333,
"learning_rate": 2.059340565107405e-05,
"loss": 4.7423,
"step": 473000
},
{
"epoch": 19.95953294271382,
"grad_norm": 1.2997910976409912,
"learning_rate": 2.0507653389358144e-05,
"loss": 4.7444,
"step": 473500
},
{
"epoch": 19.980609535050373,
"grad_norm": 1.342497706413269,
"learning_rate": 2.0421901127642242e-05,
"loss": 4.7426,
"step": 474000
},
{
"epoch": 20.001686127386925,
"grad_norm": 1.3542231321334839,
"learning_rate": 2.033614886592634e-05,
"loss": 4.75,
"step": 474500
},
{
"epoch": 20.022762719723474,
"grad_norm": 1.3310519456863403,
"learning_rate": 2.0250396604210435e-05,
"loss": 4.7432,
"step": 475000
},
{
"epoch": 20.043839312060026,
"grad_norm": 1.375835657119751,
"learning_rate": 2.0164644342494534e-05,
"loss": 4.7422,
"step": 475500
},
{
"epoch": 20.064915904396578,
"grad_norm": 1.3262016773223877,
"learning_rate": 2.007889208077863e-05,
"loss": 4.7357,
"step": 476000
},
{
"epoch": 20.085992496733127,
"grad_norm": 1.3451924324035645,
"learning_rate": 1.9993139819062727e-05,
"loss": 4.7396,
"step": 476500
},
{
"epoch": 20.10706908906968,
"grad_norm": 1.2866400480270386,
"learning_rate": 1.9907387557346825e-05,
"loss": 4.7404,
"step": 477000
},
{
"epoch": 20.12814568140623,
"grad_norm": 1.3332592248916626,
"learning_rate": 1.9821635295630923e-05,
"loss": 4.7409,
"step": 477500
},
{
"epoch": 20.14922227374278,
"grad_norm": 1.3533276319503784,
"learning_rate": 1.9735883033915022e-05,
"loss": 4.7395,
"step": 478000
},
{
"epoch": 20.17029886607933,
"grad_norm": 1.3567092418670654,
"learning_rate": 1.965013077219912e-05,
"loss": 4.7423,
"step": 478500
},
{
"epoch": 20.191375458415884,
"grad_norm": 1.3366515636444092,
"learning_rate": 1.9564378510483215e-05,
"loss": 4.7381,
"step": 479000
},
{
"epoch": 20.212452050752436,
"grad_norm": 1.3182919025421143,
"learning_rate": 1.9478626248767313e-05,
"loss": 4.7433,
"step": 479500
},
{
"epoch": 20.233528643088984,
"grad_norm": 1.3951129913330078,
"learning_rate": 1.939287398705141e-05,
"loss": 4.738,
"step": 480000
},
{
"epoch": 20.233528643088984,
"eval_accuracy": 0.7502136720509862,
"eval_loss": 4.506495475769043,
"eval_runtime": 259.9947,
"eval_samples_per_second": 464.263,
"eval_steps_per_second": 4.839,
"step": 480000
},
{
"epoch": 20.254605235425537,
"grad_norm": 1.2870360612869263,
"learning_rate": 1.9307121725335506e-05,
"loss": 4.7368,
"step": 480500
},
{
"epoch": 20.27568182776209,
"grad_norm": 1.4066439867019653,
"learning_rate": 1.9221369463619605e-05,
"loss": 4.7363,
"step": 481000
},
{
"epoch": 20.296758420098637,
"grad_norm": 1.3170355558395386,
"learning_rate": 1.9135617201903703e-05,
"loss": 4.7362,
"step": 481500
},
{
"epoch": 20.31783501243519,
"grad_norm": 1.511216640472412,
"learning_rate": 1.9049864940187798e-05,
"loss": 4.7368,
"step": 482000
},
{
"epoch": 20.33891160477174,
"grad_norm": 1.3665066957473755,
"learning_rate": 1.8964112678471896e-05,
"loss": 4.7389,
"step": 482500
},
{
"epoch": 20.35998819710829,
"grad_norm": 1.3097718954086304,
"learning_rate": 1.8878360416755995e-05,
"loss": 4.7396,
"step": 483000
},
{
"epoch": 20.381064789444842,
"grad_norm": 1.3352349996566772,
"learning_rate": 1.879260815504009e-05,
"loss": 4.7387,
"step": 483500
},
{
"epoch": 20.402141381781394,
"grad_norm": 1.2864047288894653,
"learning_rate": 1.8706855893324188e-05,
"loss": 4.7335,
"step": 484000
},
{
"epoch": 20.423217974117943,
"grad_norm": 1.3303954601287842,
"learning_rate": 1.8621103631608283e-05,
"loss": 4.7395,
"step": 484500
},
{
"epoch": 20.444294566454495,
"grad_norm": 1.328062891960144,
"learning_rate": 1.853535136989238e-05,
"loss": 4.7357,
"step": 485000
},
{
"epoch": 20.465371158791047,
"grad_norm": 1.2977768182754517,
"learning_rate": 1.844959910817648e-05,
"loss": 4.7346,
"step": 485500
},
{
"epoch": 20.4864477511276,
"grad_norm": 1.3106480836868286,
"learning_rate": 1.8363846846460574e-05,
"loss": 4.7406,
"step": 486000
},
{
"epoch": 20.507524343464148,
"grad_norm": 1.296418309211731,
"learning_rate": 1.8278094584744673e-05,
"loss": 4.7333,
"step": 486500
},
{
"epoch": 20.5286009358007,
"grad_norm": 1.299521803855896,
"learning_rate": 1.819234232302877e-05,
"loss": 4.738,
"step": 487000
},
{
"epoch": 20.549677528137252,
"grad_norm": 1.296390175819397,
"learning_rate": 1.8106590061312866e-05,
"loss": 4.7381,
"step": 487500
},
{
"epoch": 20.5707541204738,
"grad_norm": 1.3181337118148804,
"learning_rate": 1.8020837799596964e-05,
"loss": 4.7384,
"step": 488000
},
{
"epoch": 20.591830712810353,
"grad_norm": 1.339376449584961,
"learning_rate": 1.7935085537881062e-05,
"loss": 4.7382,
"step": 488500
},
{
"epoch": 20.612907305146905,
"grad_norm": 1.3499611616134644,
"learning_rate": 1.7849333276165157e-05,
"loss": 4.7347,
"step": 489000
},
{
"epoch": 20.633983897483454,
"grad_norm": 1.3533358573913574,
"learning_rate": 1.7763581014449256e-05,
"loss": 4.7364,
"step": 489500
},
{
"epoch": 20.655060489820006,
"grad_norm": 1.2989580631256104,
"learning_rate": 1.7677828752733354e-05,
"loss": 4.7328,
"step": 490000
},
{
"epoch": 20.655060489820006,
"eval_accuracy": 0.7509136241367727,
"eval_loss": 4.50206995010376,
"eval_runtime": 261.9729,
"eval_samples_per_second": 460.758,
"eval_steps_per_second": 4.802,
"step": 490000
},
{
"epoch": 20.676137082156558,
"grad_norm": 1.3470207452774048,
"learning_rate": 1.7592076491017452e-05,
"loss": 4.7372,
"step": 490500
},
{
"epoch": 20.697213674493106,
"grad_norm": 1.3582719564437866,
"learning_rate": 1.7506324229301547e-05,
"loss": 4.7386,
"step": 491000
},
{
"epoch": 20.71829026682966,
"grad_norm": 1.3344582319259644,
"learning_rate": 1.7420571967585646e-05,
"loss": 4.7366,
"step": 491500
},
{
"epoch": 20.73936685916621,
"grad_norm": 1.4493197202682495,
"learning_rate": 1.7334819705869744e-05,
"loss": 4.7346,
"step": 492000
},
{
"epoch": 20.760443451502763,
"grad_norm": 1.4000582695007324,
"learning_rate": 1.7249067444153842e-05,
"loss": 4.7386,
"step": 492500
},
{
"epoch": 20.78152004383931,
"grad_norm": 1.2997368574142456,
"learning_rate": 1.7163315182437937e-05,
"loss": 4.7331,
"step": 493000
},
{
"epoch": 20.802596636175863,
"grad_norm": 1.4288058280944824,
"learning_rate": 1.7077562920722035e-05,
"loss": 4.7353,
"step": 493500
},
{
"epoch": 20.823673228512416,
"grad_norm": 1.3395743370056152,
"learning_rate": 1.6991810659006134e-05,
"loss": 4.7368,
"step": 494000
},
{
"epoch": 20.844749820848964,
"grad_norm": 1.2884942293167114,
"learning_rate": 1.690605839729023e-05,
"loss": 4.7351,
"step": 494500
},
{
"epoch": 20.865826413185516,
"grad_norm": 1.2995128631591797,
"learning_rate": 1.6820306135574327e-05,
"loss": 4.7342,
"step": 495000
},
{
"epoch": 20.88690300552207,
"grad_norm": 1.3850295543670654,
"learning_rate": 1.6734553873858425e-05,
"loss": 4.7362,
"step": 495500
},
{
"epoch": 20.907979597858617,
"grad_norm": 1.3758419752120972,
"learning_rate": 1.664880161214252e-05,
"loss": 4.7338,
"step": 496000
},
{
"epoch": 20.92905619019517,
"grad_norm": 1.304371953010559,
"learning_rate": 1.656304935042662e-05,
"loss": 4.7367,
"step": 496500
},
{
"epoch": 20.95013278253172,
"grad_norm": 1.3075520992279053,
"learning_rate": 1.6477297088710717e-05,
"loss": 4.7336,
"step": 497000
},
{
"epoch": 20.97120937486827,
"grad_norm": 1.3707809448242188,
"learning_rate": 1.639154482699481e-05,
"loss": 4.7341,
"step": 497500
},
{
"epoch": 20.992285967204822,
"grad_norm": 1.311104416847229,
"learning_rate": 1.630579256527891e-05,
"loss": 4.7317,
"step": 498000
},
{
"epoch": 21.013362559541374,
"grad_norm": 1.3266741037368774,
"learning_rate": 1.6220040303563008e-05,
"loss": 4.7311,
"step": 498500
},
{
"epoch": 21.034439151877926,
"grad_norm": 1.34255051612854,
"learning_rate": 1.6134288041847103e-05,
"loss": 4.7336,
"step": 499000
},
{
"epoch": 21.055515744214475,
"grad_norm": 1.3618738651275635,
"learning_rate": 1.60485357801312e-05,
"loss": 4.7284,
"step": 499500
},
{
"epoch": 21.076592336551027,
"grad_norm": 1.2942471504211426,
"learning_rate": 1.59627835184153e-05,
"loss": 4.7289,
"step": 500000
},
{
"epoch": 21.076592336551027,
"eval_accuracy": 0.7515025937911175,
"eval_loss": 4.497540473937988,
"eval_runtime": 262.2804,
"eval_samples_per_second": 460.217,
"eval_steps_per_second": 4.796,
"step": 500000
},
{
"epoch": 21.09766892888758,
"grad_norm": 1.3644174337387085,
"learning_rate": 1.5877031256699395e-05,
"loss": 4.7281,
"step": 500500
},
{
"epoch": 21.118745521224128,
"grad_norm": 1.2974344491958618,
"learning_rate": 1.5791278994983493e-05,
"loss": 4.731,
"step": 501000
},
{
"epoch": 21.13982211356068,
"grad_norm": 1.323365330696106,
"learning_rate": 1.570552673326759e-05,
"loss": 4.727,
"step": 501500
},
{
"epoch": 21.16089870589723,
"grad_norm": 1.331466555595398,
"learning_rate": 1.5619774471551686e-05,
"loss": 4.7304,
"step": 502000
},
{
"epoch": 21.18197529823378,
"grad_norm": 1.257196307182312,
"learning_rate": 1.5534022209835785e-05,
"loss": 4.7304,
"step": 502500
},
{
"epoch": 21.203051890570332,
"grad_norm": 1.2858681678771973,
"learning_rate": 1.544826994811988e-05,
"loss": 4.7294,
"step": 503000
},
{
"epoch": 21.224128482906885,
"grad_norm": 1.3078186511993408,
"learning_rate": 1.5362517686403978e-05,
"loss": 4.7296,
"step": 503500
},
{
"epoch": 21.245205075243433,
"grad_norm": 1.3277028799057007,
"learning_rate": 1.5276765424688076e-05,
"loss": 4.731,
"step": 504000
},
{
"epoch": 21.266281667579985,
"grad_norm": 1.3923665285110474,
"learning_rate": 1.5191013162972173e-05,
"loss": 4.7282,
"step": 504500
},
{
"epoch": 21.287358259916537,
"grad_norm": 1.399975299835205,
"learning_rate": 1.5105260901256271e-05,
"loss": 4.7288,
"step": 505000
},
{
"epoch": 21.308434852253086,
"grad_norm": 1.3475841283798218,
"learning_rate": 1.501950863954037e-05,
"loss": 4.7288,
"step": 505500
},
{
"epoch": 21.329511444589638,
"grad_norm": 1.308192491531372,
"learning_rate": 1.4933756377824464e-05,
"loss": 4.7262,
"step": 506000
},
{
"epoch": 21.35058803692619,
"grad_norm": 1.3205522298812866,
"learning_rate": 1.4848004116108563e-05,
"loss": 4.7226,
"step": 506500
},
{
"epoch": 21.371664629262742,
"grad_norm": 1.2941193580627441,
"learning_rate": 1.4762251854392661e-05,
"loss": 4.73,
"step": 507000
},
{
"epoch": 21.39274122159929,
"grad_norm": 1.3256853818893433,
"learning_rate": 1.4676499592676758e-05,
"loss": 4.7289,
"step": 507500
},
{
"epoch": 21.413817813935843,
"grad_norm": 1.318387508392334,
"learning_rate": 1.4590747330960856e-05,
"loss": 4.7276,
"step": 508000
},
{
"epoch": 21.434894406272395,
"grad_norm": 1.2960978746414185,
"learning_rate": 1.4504995069244952e-05,
"loss": 4.7249,
"step": 508500
},
{
"epoch": 21.455970998608944,
"grad_norm": 1.284035086631775,
"learning_rate": 1.4419242807529049e-05,
"loss": 4.7267,
"step": 509000
},
{
"epoch": 21.477047590945496,
"grad_norm": 1.3038010597229004,
"learning_rate": 1.4333490545813147e-05,
"loss": 4.7276,
"step": 509500
},
{
"epoch": 21.498124183282048,
"grad_norm": 1.3180149793624878,
"learning_rate": 1.4247738284097246e-05,
"loss": 4.7235,
"step": 510000
},
{
"epoch": 21.498124183282048,
"eval_accuracy": 0.7520528766686749,
"eval_loss": 4.494962215423584,
"eval_runtime": 260.6091,
"eval_samples_per_second": 463.169,
"eval_steps_per_second": 4.827,
"step": 510000
},
{
"epoch": 21.519200775618597,
"grad_norm": 1.3393025398254395,
"learning_rate": 1.416198602238134e-05,
"loss": 4.7279,
"step": 510500
},
{
"epoch": 21.54027736795515,
"grad_norm": 1.2643400430679321,
"learning_rate": 1.4076233760665439e-05,
"loss": 4.7262,
"step": 511000
},
{
"epoch": 21.5613539602917,
"grad_norm": 1.3056830167770386,
"learning_rate": 1.3990481498949534e-05,
"loss": 4.7223,
"step": 511500
},
{
"epoch": 21.58243055262825,
"grad_norm": 1.335349440574646,
"learning_rate": 1.3904729237233632e-05,
"loss": 4.7279,
"step": 512000
},
{
"epoch": 21.6035071449648,
"grad_norm": 1.303031325340271,
"learning_rate": 1.381897697551773e-05,
"loss": 4.7272,
"step": 512500
},
{
"epoch": 21.624583737301354,
"grad_norm": 1.3515573740005493,
"learning_rate": 1.3733224713801825e-05,
"loss": 4.7288,
"step": 513000
},
{
"epoch": 21.645660329637906,
"grad_norm": 1.324328064918518,
"learning_rate": 1.3647472452085924e-05,
"loss": 4.7297,
"step": 513500
},
{
"epoch": 21.666736921974454,
"grad_norm": 1.3030775785446167,
"learning_rate": 1.3561720190370022e-05,
"loss": 4.726,
"step": 514000
},
{
"epoch": 21.687813514311006,
"grad_norm": 1.3383944034576416,
"learning_rate": 1.3475967928654119e-05,
"loss": 4.7244,
"step": 514500
},
{
"epoch": 21.70889010664756,
"grad_norm": 1.325595498085022,
"learning_rate": 1.3390215666938217e-05,
"loss": 4.7266,
"step": 515000
},
{
"epoch": 21.729966698984107,
"grad_norm": 1.2994537353515625,
"learning_rate": 1.3304463405222315e-05,
"loss": 4.7273,
"step": 515500
},
{
"epoch": 21.75104329132066,
"grad_norm": 1.4248188734054565,
"learning_rate": 1.321871114350641e-05,
"loss": 4.7288,
"step": 516000
},
{
"epoch": 21.77211988365721,
"grad_norm": 1.381913185119629,
"learning_rate": 1.3132958881790508e-05,
"loss": 4.7225,
"step": 516500
},
{
"epoch": 21.79319647599376,
"grad_norm": 1.3230239152908325,
"learning_rate": 1.3047206620074607e-05,
"loss": 4.7218,
"step": 517000
},
{
"epoch": 21.814273068330312,
"grad_norm": 1.3599411249160767,
"learning_rate": 1.2961454358358702e-05,
"loss": 4.7247,
"step": 517500
},
{
"epoch": 21.835349660666864,
"grad_norm": 1.3636369705200195,
"learning_rate": 1.28757020966428e-05,
"loss": 4.7218,
"step": 518000
},
{
"epoch": 21.856426253003413,
"grad_norm": 1.3184322118759155,
"learning_rate": 1.2789949834926898e-05,
"loss": 4.7221,
"step": 518500
},
{
"epoch": 21.877502845339965,
"grad_norm": 1.3112659454345703,
"learning_rate": 1.2704197573210993e-05,
"loss": 4.7273,
"step": 519000
},
{
"epoch": 21.898579437676517,
"grad_norm": 1.305115818977356,
"learning_rate": 1.2618445311495092e-05,
"loss": 4.7216,
"step": 519500
},
{
"epoch": 21.91965603001307,
"grad_norm": 1.3562394380569458,
"learning_rate": 1.2532693049779186e-05,
"loss": 4.7191,
"step": 520000
},
{
"epoch": 21.91965603001307,
"eval_accuracy": 0.7524910270687756,
"eval_loss": 4.492299556732178,
"eval_runtime": 261.6865,
"eval_samples_per_second": 461.262,
"eval_steps_per_second": 4.807,
"step": 520000
},
{
"epoch": 21.940732622349618,
"grad_norm": 1.3857940435409546,
"learning_rate": 1.2446940788063285e-05,
"loss": 4.7234,
"step": 520500
},
{
"epoch": 21.96180921468617,
"grad_norm": 1.3443962335586548,
"learning_rate": 1.2361188526347383e-05,
"loss": 4.7237,
"step": 521000
},
{
"epoch": 21.982885807022722,
"grad_norm": 1.3494166135787964,
"learning_rate": 1.227543626463148e-05,
"loss": 4.7242,
"step": 521500
},
{
"epoch": 22.00396239935927,
"grad_norm": 1.35657799243927,
"learning_rate": 1.2189684002915578e-05,
"loss": 4.72,
"step": 522000
},
{
"epoch": 22.025038991695823,
"grad_norm": 1.3643733263015747,
"learning_rate": 1.2103931741199675e-05,
"loss": 4.7188,
"step": 522500
},
{
"epoch": 22.046115584032375,
"grad_norm": 1.3536295890808105,
"learning_rate": 1.2018179479483773e-05,
"loss": 4.7206,
"step": 523000
},
{
"epoch": 22.067192176368923,
"grad_norm": 1.3302286863327026,
"learning_rate": 1.193242721776787e-05,
"loss": 4.721,
"step": 523500
},
{
"epoch": 22.088268768705476,
"grad_norm": 1.3674339056015015,
"learning_rate": 1.1846674956051966e-05,
"loss": 4.7229,
"step": 524000
},
{
"epoch": 22.109345361042028,
"grad_norm": 1.3218138217926025,
"learning_rate": 1.1760922694336064e-05,
"loss": 4.7178,
"step": 524500
},
{
"epoch": 22.130421953378576,
"grad_norm": 1.3473918437957764,
"learning_rate": 1.1675170432620161e-05,
"loss": 4.7233,
"step": 525000
},
{
"epoch": 22.15149854571513,
"grad_norm": 1.2830966711044312,
"learning_rate": 1.1589418170904258e-05,
"loss": 4.7173,
"step": 525500
},
{
"epoch": 22.17257513805168,
"grad_norm": 1.3221532106399536,
"learning_rate": 1.1503665909188354e-05,
"loss": 4.7176,
"step": 526000
},
{
"epoch": 22.193651730388233,
"grad_norm": 1.3866322040557861,
"learning_rate": 1.1417913647472453e-05,
"loss": 4.7187,
"step": 526500
},
{
"epoch": 22.21472832272478,
"grad_norm": 1.3473929166793823,
"learning_rate": 1.133216138575655e-05,
"loss": 4.7189,
"step": 527000
},
{
"epoch": 22.235804915061333,
"grad_norm": 1.3498623371124268,
"learning_rate": 1.1246409124040646e-05,
"loss": 4.7177,
"step": 527500
},
{
"epoch": 22.256881507397885,
"grad_norm": 1.3142567873001099,
"learning_rate": 1.1160656862324744e-05,
"loss": 4.72,
"step": 528000
},
{
"epoch": 22.277958099734434,
"grad_norm": 1.3304718732833862,
"learning_rate": 1.1074904600608842e-05,
"loss": 4.7197,
"step": 528500
},
{
"epoch": 22.299034692070986,
"grad_norm": 1.3467963933944702,
"learning_rate": 1.0989152338892939e-05,
"loss": 4.7153,
"step": 529000
},
{
"epoch": 22.320111284407538,
"grad_norm": 1.4053053855895996,
"learning_rate": 1.0903400077177037e-05,
"loss": 4.7201,
"step": 529500
},
{
"epoch": 22.341187876744087,
"grad_norm": 1.309505820274353,
"learning_rate": 1.0817647815461134e-05,
"loss": 4.7191,
"step": 530000
},
{
"epoch": 22.341187876744087,
"eval_accuracy": 0.7529558536830261,
"eval_loss": 4.489067077636719,
"eval_runtime": 260.9275,
"eval_samples_per_second": 462.604,
"eval_steps_per_second": 4.821,
"step": 530000
},
{
"epoch": 22.36226446908064,
"grad_norm": 1.3429690599441528,
"learning_rate": 1.073189555374523e-05,
"loss": 4.721,
"step": 530500
},
{
"epoch": 22.38334106141719,
"grad_norm": 1.362031102180481,
"learning_rate": 1.0646143292029327e-05,
"loss": 4.7228,
"step": 531000
},
{
"epoch": 22.40441765375374,
"grad_norm": 6.282242774963379,
"learning_rate": 1.0560391030313425e-05,
"loss": 4.7145,
"step": 531500
},
{
"epoch": 22.42549424609029,
"grad_norm": 1.329899549484253,
"learning_rate": 1.0474638768597522e-05,
"loss": 4.7172,
"step": 532000
},
{
"epoch": 22.446570838426844,
"grad_norm": 1.326348900794983,
"learning_rate": 1.0388886506881619e-05,
"loss": 4.7188,
"step": 532500
},
{
"epoch": 22.467647430763392,
"grad_norm": 1.3656094074249268,
"learning_rate": 1.0303134245165717e-05,
"loss": 4.7214,
"step": 533000
},
{
"epoch": 22.488724023099945,
"grad_norm": 1.3846843242645264,
"learning_rate": 1.0217381983449814e-05,
"loss": 4.7169,
"step": 533500
},
{
"epoch": 22.509800615436497,
"grad_norm": 1.3037809133529663,
"learning_rate": 1.013162972173391e-05,
"loss": 4.7148,
"step": 534000
},
{
"epoch": 22.53087720777305,
"grad_norm": 1.3882744312286377,
"learning_rate": 1.0045877460018009e-05,
"loss": 4.7198,
"step": 534500
},
{
"epoch": 22.551953800109597,
"grad_norm": 1.3189753293991089,
"learning_rate": 9.960125198302105e-06,
"loss": 4.72,
"step": 535000
},
{
"epoch": 22.57303039244615,
"grad_norm": 2.0432140827178955,
"learning_rate": 9.874372936586203e-06,
"loss": 4.7173,
"step": 535500
},
{
"epoch": 22.5941069847827,
"grad_norm": 1.3240022659301758,
"learning_rate": 9.7886206748703e-06,
"loss": 4.7162,
"step": 536000
},
{
"epoch": 22.61518357711925,
"grad_norm": 1.3836686611175537,
"learning_rate": 9.702868413154398e-06,
"loss": 4.7164,
"step": 536500
},
{
"epoch": 22.636260169455802,
"grad_norm": 1.3244681358337402,
"learning_rate": 9.617116151438495e-06,
"loss": 4.715,
"step": 537000
},
{
"epoch": 22.657336761792354,
"grad_norm": 1.3440351486206055,
"learning_rate": 9.531363889722592e-06,
"loss": 4.716,
"step": 537500
},
{
"epoch": 22.678413354128903,
"grad_norm": 1.2966499328613281,
"learning_rate": 9.44561162800669e-06,
"loss": 4.7141,
"step": 538000
},
{
"epoch": 22.699489946465455,
"grad_norm": 1.3266011476516724,
"learning_rate": 9.359859366290787e-06,
"loss": 4.7164,
"step": 538500
},
{
"epoch": 22.720566538802007,
"grad_norm": 1.3026056289672852,
"learning_rate": 9.274107104574883e-06,
"loss": 4.7172,
"step": 539000
},
{
"epoch": 22.741643131138556,
"grad_norm": 1.315037727355957,
"learning_rate": 9.18835484285898e-06,
"loss": 4.7129,
"step": 539500
},
{
"epoch": 22.762719723475108,
"grad_norm": 1.3686784505844116,
"learning_rate": 9.102602581143078e-06,
"loss": 4.714,
"step": 540000
},
{
"epoch": 22.762719723475108,
"eval_accuracy": 0.7533862121787827,
"eval_loss": 4.487707614898682,
"eval_runtime": 260.8633,
"eval_samples_per_second": 462.718,
"eval_steps_per_second": 4.822,
"step": 540000
},
{
"epoch": 22.78379631581166,
"grad_norm": 1.3466520309448242,
"learning_rate": 9.016850319427175e-06,
"loss": 4.7183,
"step": 540500
},
{
"epoch": 22.804872908148212,
"grad_norm": 1.3874651193618774,
"learning_rate": 8.931098057711271e-06,
"loss": 4.7155,
"step": 541000
},
{
"epoch": 22.82594950048476,
"grad_norm": 1.3618019819259644,
"learning_rate": 8.84534579599537e-06,
"loss": 4.7174,
"step": 541500
},
{
"epoch": 22.847026092821313,
"grad_norm": 1.3819440603256226,
"learning_rate": 8.759593534279468e-06,
"loss": 4.7146,
"step": 542000
},
{
"epoch": 22.868102685157865,
"grad_norm": 1.3088500499725342,
"learning_rate": 8.673841272563565e-06,
"loss": 4.7133,
"step": 542500
},
{
"epoch": 22.889179277494414,
"grad_norm": 1.35321044921875,
"learning_rate": 8.588089010847663e-06,
"loss": 4.7168,
"step": 543000
},
{
"epoch": 22.910255869830966,
"grad_norm": 1.9699029922485352,
"learning_rate": 8.50233674913176e-06,
"loss": 4.7153,
"step": 543500
},
{
"epoch": 22.931332462167518,
"grad_norm": 1.315864086151123,
"learning_rate": 8.416584487415856e-06,
"loss": 4.7114,
"step": 544000
},
{
"epoch": 22.952409054504066,
"grad_norm": 1.3785382509231567,
"learning_rate": 8.330832225699953e-06,
"loss": 4.7093,
"step": 544500
},
{
"epoch": 22.97348564684062,
"grad_norm": 1.3577860593795776,
"learning_rate": 8.245079963984051e-06,
"loss": 4.7105,
"step": 545000
},
{
"epoch": 22.99456223917717,
"grad_norm": 1.3272500038146973,
"learning_rate": 8.159327702268148e-06,
"loss": 4.7165,
"step": 545500
},
{
"epoch": 23.01563883151372,
"grad_norm": 1.334040641784668,
"learning_rate": 8.073575440552244e-06,
"loss": 4.7133,
"step": 546000
},
{
"epoch": 23.03671542385027,
"grad_norm": 1.3966127634048462,
"learning_rate": 7.987823178836343e-06,
"loss": 4.7115,
"step": 546500
},
{
"epoch": 23.057792016186824,
"grad_norm": 1.3549610376358032,
"learning_rate": 7.90207091712044e-06,
"loss": 4.7107,
"step": 547000
},
{
"epoch": 23.078868608523376,
"grad_norm": 1.3909356594085693,
"learning_rate": 7.816318655404536e-06,
"loss": 4.7109,
"step": 547500
},
{
"epoch": 23.099945200859924,
"grad_norm": 1.3775635957717896,
"learning_rate": 7.730566393688634e-06,
"loss": 4.7122,
"step": 548000
},
{
"epoch": 23.121021793196476,
"grad_norm": 1.3257262706756592,
"learning_rate": 7.64481413197273e-06,
"loss": 4.7119,
"step": 548500
},
{
"epoch": 23.14209838553303,
"grad_norm": 1.3208304643630981,
"learning_rate": 7.559061870256828e-06,
"loss": 4.7128,
"step": 549000
},
{
"epoch": 23.163174977869577,
"grad_norm": 1.3450058698654175,
"learning_rate": 7.473309608540925e-06,
"loss": 4.7119,
"step": 549500
},
{
"epoch": 23.18425157020613,
"grad_norm": 1.3811434507369995,
"learning_rate": 7.387557346825023e-06,
"loss": 4.714,
"step": 550000
},
{
"epoch": 23.18425157020613,
"eval_accuracy": 0.7539060937667986,
"eval_loss": 4.483209609985352,
"eval_runtime": 261.225,
"eval_samples_per_second": 462.077,
"eval_steps_per_second": 4.816,
"step": 550000
},
{
"epoch": 23.20532816254268,
"grad_norm": 1.371256947517395,
"learning_rate": 7.3018050851091205e-06,
"loss": 4.7089,
"step": 550500
},
{
"epoch": 23.22640475487923,
"grad_norm": 1.3358173370361328,
"learning_rate": 7.216052823393217e-06,
"loss": 4.7114,
"step": 551000
},
{
"epoch": 23.247481347215782,
"grad_norm": 1.3546581268310547,
"learning_rate": 7.1303005616773155e-06,
"loss": 4.7109,
"step": 551500
},
{
"epoch": 23.268557939552334,
"grad_norm": 1.380530834197998,
"learning_rate": 7.044548299961412e-06,
"loss": 4.7156,
"step": 552000
},
{
"epoch": 23.289634531888883,
"grad_norm": 1.3954757452011108,
"learning_rate": 6.958796038245509e-06,
"loss": 4.706,
"step": 552500
},
{
"epoch": 23.310711124225435,
"grad_norm": 1.4315953254699707,
"learning_rate": 6.873043776529606e-06,
"loss": 4.7149,
"step": 553000
},
{
"epoch": 23.331787716561987,
"grad_norm": 1.365644931793213,
"learning_rate": 6.787291514813704e-06,
"loss": 4.7086,
"step": 553500
},
{
"epoch": 23.35286430889854,
"grad_norm": 1.366931438446045,
"learning_rate": 6.701539253097801e-06,
"loss": 4.7096,
"step": 554000
},
{
"epoch": 23.373940901235088,
"grad_norm": 1.4178738594055176,
"learning_rate": 6.615786991381898e-06,
"loss": 4.7103,
"step": 554500
},
{
"epoch": 23.39501749357164,
"grad_norm": 1.3668785095214844,
"learning_rate": 6.530034729665996e-06,
"loss": 4.7105,
"step": 555000
},
{
"epoch": 23.416094085908192,
"grad_norm": 1.3625216484069824,
"learning_rate": 6.444282467950093e-06,
"loss": 4.7094,
"step": 555500
},
{
"epoch": 23.43717067824474,
"grad_norm": 1.3226115703582764,
"learning_rate": 6.358530206234189e-06,
"loss": 4.7085,
"step": 556000
},
{
"epoch": 23.458247270581293,
"grad_norm": 1.3469371795654297,
"learning_rate": 6.272777944518287e-06,
"loss": 4.7105,
"step": 556500
},
{
"epoch": 23.479323862917845,
"grad_norm": 1.3316009044647217,
"learning_rate": 6.187025682802384e-06,
"loss": 4.7113,
"step": 557000
},
{
"epoch": 23.500400455254393,
"grad_norm": 1.3537923097610474,
"learning_rate": 6.101273421086482e-06,
"loss": 4.7083,
"step": 557500
},
{
"epoch": 23.521477047590945,
"grad_norm": 1.3457926511764526,
"learning_rate": 6.015521159370579e-06,
"loss": 4.7094,
"step": 558000
},
{
"epoch": 23.542553639927497,
"grad_norm": 1.3575489521026611,
"learning_rate": 5.929768897654676e-06,
"loss": 4.7084,
"step": 558500
},
{
"epoch": 23.563630232264046,
"grad_norm": 1.3390086889266968,
"learning_rate": 5.844016635938773e-06,
"loss": 4.7104,
"step": 559000
},
{
"epoch": 23.584706824600598,
"grad_norm": 1.3351737260818481,
"learning_rate": 5.75826437422287e-06,
"loss": 4.7077,
"step": 559500
},
{
"epoch": 23.60578341693715,
"grad_norm": 1.3952457904815674,
"learning_rate": 5.672512112506967e-06,
"loss": 4.7086,
"step": 560000
},
{
"epoch": 23.60578341693715,
"eval_accuracy": 0.7543988005542708,
"eval_loss": 4.48004150390625,
"eval_runtime": 260.8675,
"eval_samples_per_second": 462.71,
"eval_steps_per_second": 4.822,
"step": 560000
},
{
"epoch": 23.6268600092737,
"grad_norm": 1.33452308177948,
"learning_rate": 5.5867598507910655e-06,
"loss": 4.708,
"step": 560500
},
{
"epoch": 23.64793660161025,
"grad_norm": 1.3753418922424316,
"learning_rate": 5.501007589075162e-06,
"loss": 4.7088,
"step": 561000
},
{
"epoch": 23.669013193946803,
"grad_norm": 1.3825522661209106,
"learning_rate": 5.41525532735926e-06,
"loss": 4.7093,
"step": 561500
},
{
"epoch": 23.690089786283355,
"grad_norm": 1.4510549306869507,
"learning_rate": 5.329503065643356e-06,
"loss": 4.7032,
"step": 562000
},
{
"epoch": 23.711166378619904,
"grad_norm": 1.3581342697143555,
"learning_rate": 5.243750803927454e-06,
"loss": 4.7056,
"step": 562500
},
{
"epoch": 23.732242970956456,
"grad_norm": 1.3992446660995483,
"learning_rate": 5.157998542211551e-06,
"loss": 4.7094,
"step": 563000
},
{
"epoch": 23.753319563293008,
"grad_norm": 1.3415894508361816,
"learning_rate": 5.072246280495649e-06,
"loss": 4.705,
"step": 563500
},
{
"epoch": 23.774396155629557,
"grad_norm": 1.3453013896942139,
"learning_rate": 4.986494018779746e-06,
"loss": 4.7079,
"step": 564000
},
{
"epoch": 23.79547274796611,
"grad_norm": 1.3747302293777466,
"learning_rate": 4.900741757063843e-06,
"loss": 4.7072,
"step": 564500
},
{
"epoch": 23.81654934030266,
"grad_norm": 1.3544883728027344,
"learning_rate": 4.81498949534794e-06,
"loss": 4.7045,
"step": 565000
},
{
"epoch": 23.83762593263921,
"grad_norm": 1.3366096019744873,
"learning_rate": 4.729237233632038e-06,
"loss": 4.7046,
"step": 565500
},
{
"epoch": 23.85870252497576,
"grad_norm": 1.3346614837646484,
"learning_rate": 4.643484971916134e-06,
"loss": 4.7082,
"step": 566000
},
{
"epoch": 23.879779117312314,
"grad_norm": 1.3355252742767334,
"learning_rate": 4.557732710200232e-06,
"loss": 4.7054,
"step": 566500
},
{
"epoch": 23.900855709648862,
"grad_norm": 1.352321982383728,
"learning_rate": 4.471980448484329e-06,
"loss": 4.7049,
"step": 567000
},
{
"epoch": 23.921932301985414,
"grad_norm": 1.4056611061096191,
"learning_rate": 4.386228186768427e-06,
"loss": 4.7061,
"step": 567500
},
{
"epoch": 23.943008894321967,
"grad_norm": 1.3331092596054077,
"learning_rate": 4.300475925052524e-06,
"loss": 4.7055,
"step": 568000
},
{
"epoch": 23.96408548665852,
"grad_norm": 1.3626446723937988,
"learning_rate": 4.214723663336621e-06,
"loss": 4.7105,
"step": 568500
},
{
"epoch": 23.985162078995067,
"grad_norm": 1.3941088914871216,
"learning_rate": 4.128971401620718e-06,
"loss": 4.7047,
"step": 569000
},
{
"epoch": 24.00623867133162,
"grad_norm": 1.362053394317627,
"learning_rate": 4.043219139904815e-06,
"loss": 4.7059,
"step": 569500
},
{
"epoch": 24.02731526366817,
"grad_norm": 1.3403210639953613,
"learning_rate": 3.957466878188912e-06,
"loss": 4.7055,
"step": 570000
},
{
"epoch": 24.02731526366817,
"eval_accuracy": 0.7546587322670965,
"eval_loss": 4.478504657745361,
"eval_runtime": 262.6667,
"eval_samples_per_second": 459.541,
"eval_steps_per_second": 4.789,
"step": 570000
},
{
"epoch": 24.04839185600472,
"grad_norm": 1.3970316648483276,
"learning_rate": 3.87171461647301e-06,
"loss": 4.7067,
"step": 570500
},
{
"epoch": 24.069468448341272,
"grad_norm": 1.318151831626892,
"learning_rate": 3.7859623547571067e-06,
"loss": 4.7026,
"step": 571000
},
{
"epoch": 24.090545040677824,
"grad_norm": 1.3458597660064697,
"learning_rate": 3.7002100930412046e-06,
"loss": 4.7021,
"step": 571500
},
{
"epoch": 24.111621633014373,
"grad_norm": 1.3329774141311646,
"learning_rate": 3.614457831325301e-06,
"loss": 4.7086,
"step": 572000
},
{
"epoch": 24.132698225350925,
"grad_norm": 1.3571734428405762,
"learning_rate": 3.5287055696093987e-06,
"loss": 4.7036,
"step": 572500
},
{
"epoch": 24.153774817687477,
"grad_norm": 1.3504315614700317,
"learning_rate": 3.4429533078934957e-06,
"loss": 4.7048,
"step": 573000
},
{
"epoch": 24.174851410024026,
"grad_norm": 1.3554165363311768,
"learning_rate": 3.357201046177593e-06,
"loss": 4.7084,
"step": 573500
},
{
"epoch": 24.195928002360578,
"grad_norm": 1.324753761291504,
"learning_rate": 3.2714487844616906e-06,
"loss": 4.7075,
"step": 574000
},
{
"epoch": 24.21700459469713,
"grad_norm": 1.3569416999816895,
"learning_rate": 3.1856965227457872e-06,
"loss": 4.7065,
"step": 574500
},
{
"epoch": 24.238081187033682,
"grad_norm": 1.3505769968032837,
"learning_rate": 3.099944261029885e-06,
"loss": 4.709,
"step": 575000
},
{
"epoch": 24.25915777937023,
"grad_norm": 1.3743845224380493,
"learning_rate": 3.014191999313982e-06,
"loss": 4.7038,
"step": 575500
},
{
"epoch": 24.280234371706783,
"grad_norm": 1.3651432991027832,
"learning_rate": 2.928439737598079e-06,
"loss": 4.7014,
"step": 576000
},
{
"epoch": 24.301310964043335,
"grad_norm": 1.3437137603759766,
"learning_rate": 2.8426874758821767e-06,
"loss": 4.7048,
"step": 576500
},
{
"epoch": 24.322387556379883,
"grad_norm": 1.3715219497680664,
"learning_rate": 2.7569352141662737e-06,
"loss": 4.6991,
"step": 577000
},
{
"epoch": 24.343464148716436,
"grad_norm": 1.3638877868652344,
"learning_rate": 2.6711829524503707e-06,
"loss": 4.7007,
"step": 577500
},
{
"epoch": 24.364540741052988,
"grad_norm": 1.3888444900512695,
"learning_rate": 2.585430690734468e-06,
"loss": 4.7016,
"step": 578000
},
{
"epoch": 24.385617333389536,
"grad_norm": 1.3717230558395386,
"learning_rate": 2.4996784290185657e-06,
"loss": 4.7025,
"step": 578500
},
{
"epoch": 24.40669392572609,
"grad_norm": 1.342763066291809,
"learning_rate": 2.4139261673026627e-06,
"loss": 4.6984,
"step": 579000
},
{
"epoch": 24.42777051806264,
"grad_norm": 1.3474459648132324,
"learning_rate": 2.3281739055867597e-06,
"loss": 4.7027,
"step": 579500
},
{
"epoch": 24.44884711039919,
"grad_norm": 1.3378591537475586,
"learning_rate": 2.242421643870857e-06,
"loss": 4.7024,
"step": 580000
},
{
"epoch": 24.44884711039919,
"eval_accuracy": 0.7551914114581149,
"eval_loss": 4.474733829498291,
"eval_runtime": 260.9817,
"eval_samples_per_second": 462.508,
"eval_steps_per_second": 4.82,
"step": 580000
},
{
"epoch": 24.46992370273574,
"grad_norm": 1.3737248182296753,
"learning_rate": 2.1566693821549547e-06,
"loss": 4.7026,
"step": 580500
},
{
"epoch": 24.491000295072293,
"grad_norm": 1.3439487218856812,
"learning_rate": 2.0709171204390517e-06,
"loss": 4.7058,
"step": 581000
},
{
"epoch": 24.512076887408845,
"grad_norm": 1.3368207216262817,
"learning_rate": 1.985164858723149e-06,
"loss": 4.7,
"step": 581500
},
{
"epoch": 24.533153479745394,
"grad_norm": 1.3779650926589966,
"learning_rate": 1.8994125970072462e-06,
"loss": 4.706,
"step": 582000
},
{
"epoch": 24.554230072081946,
"grad_norm": 1.367583990097046,
"learning_rate": 1.8136603352913432e-06,
"loss": 4.7004,
"step": 582500
},
{
"epoch": 24.5753066644185,
"grad_norm": 1.343848705291748,
"learning_rate": 1.7279080735754405e-06,
"loss": 4.7052,
"step": 583000
},
{
"epoch": 24.596383256755047,
"grad_norm": 1.3909797668457031,
"learning_rate": 1.642155811859538e-06,
"loss": 4.7042,
"step": 583500
},
{
"epoch": 24.6174598490916,
"grad_norm": 1.3735647201538086,
"learning_rate": 1.5564035501436352e-06,
"loss": 4.7026,
"step": 584000
},
{
"epoch": 24.63853644142815,
"grad_norm": 1.3765846490859985,
"learning_rate": 1.4706512884277324e-06,
"loss": 4.7021,
"step": 584500
},
{
"epoch": 24.6596130337647,
"grad_norm": 1.3304394483566284,
"learning_rate": 1.3848990267118295e-06,
"loss": 4.6991,
"step": 585000
},
{
"epoch": 24.680689626101252,
"grad_norm": 1.3204551935195923,
"learning_rate": 1.299146764995927e-06,
"loss": 4.7032,
"step": 585500
},
{
"epoch": 24.701766218437804,
"grad_norm": 1.3790500164031982,
"learning_rate": 1.213394503280024e-06,
"loss": 4.7008,
"step": 586000
},
{
"epoch": 24.722842810774353,
"grad_norm": 1.3450005054473877,
"learning_rate": 1.1276422415641212e-06,
"loss": 4.7009,
"step": 586500
},
{
"epoch": 24.743919403110905,
"grad_norm": 1.346596121788025,
"learning_rate": 1.0418899798482187e-06,
"loss": 4.703,
"step": 587000
},
{
"epoch": 24.764995995447457,
"grad_norm": 1.371549367904663,
"learning_rate": 9.561377181323157e-07,
"loss": 4.7001,
"step": 587500
},
{
"epoch": 24.786072587784005,
"grad_norm": 1.3492426872253418,
"learning_rate": 8.70385456416413e-07,
"loss": 4.7005,
"step": 588000
},
{
"epoch": 24.807149180120557,
"grad_norm": 1.3447999954223633,
"learning_rate": 7.846331947005103e-07,
"loss": 4.7039,
"step": 588500
},
{
"epoch": 24.82822577245711,
"grad_norm": 1.4224281311035156,
"learning_rate": 6.988809329846075e-07,
"loss": 4.6972,
"step": 589000
},
{
"epoch": 24.84930236479366,
"grad_norm": 1.3961892127990723,
"learning_rate": 6.131286712687047e-07,
"loss": 4.7011,
"step": 589500
},
{
"epoch": 24.87037895713021,
"grad_norm": 1.3752344846725464,
"learning_rate": 5.27376409552802e-07,
"loss": 4.6993,
"step": 590000
},
{
"epoch": 24.87037895713021,
"eval_accuracy": 0.7550050066880156,
"eval_loss": 4.4753289222717285,
"eval_runtime": 260.8175,
"eval_samples_per_second": 462.799,
"eval_steps_per_second": 4.823,
"step": 590000
},
{
"epoch": 24.891455549466762,
"grad_norm": 1.3609323501586914,
"learning_rate": 4.4162414783689917e-07,
"loss": 4.7003,
"step": 590500
},
{
"epoch": 24.912532141803315,
"grad_norm": 1.3006179332733154,
"learning_rate": 3.558718861209964e-07,
"loss": 4.6995,
"step": 591000
},
{
"epoch": 24.933608734139863,
"grad_norm": 1.3899483680725098,
"learning_rate": 2.701196244050937e-07,
"loss": 4.7,
"step": 591500
},
{
"epoch": 24.954685326476415,
"grad_norm": 1.3750133514404297,
"learning_rate": 1.8436736268919094e-07,
"loss": 4.6996,
"step": 592000
},
{
"epoch": 24.975761918812967,
"grad_norm": 1.3374977111816406,
"learning_rate": 9.861510097328816e-08,
"loss": 4.7004,
"step": 592500
},
{
"epoch": 24.996838511149516,
"grad_norm": 1.3854748010635376,
"learning_rate": 1.2862839257385414e-08,
"loss": 4.7005,
"step": 593000
},
{
"epoch": 25.0,
"step": 593075,
"total_flos": 1.5101928630878515e+19,
"train_loss": 5.231086326933267,
"train_runtime": 273328.675,
"train_samples_per_second": 208.297,
"train_steps_per_second": 2.17
}
],
"logging_steps": 500,
"max_steps": 593075,
"num_input_tokens_seen": 0,
"num_train_epochs": 25,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5101928630878515e+19,
"train_batch_size": 96,
"trial_name": null,
"trial_params": null
}