{ "best_metric": 0.77775, "best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-leukemia.v2.2/checkpoint-4687", "epoch": 29.951999999999998, "eval_steps": 500, "global_step": 9360, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.032, "grad_norm": 301599.375, "learning_rate": 1.0683760683760684e-05, "loss": 0.2217, "step": 10 }, { "epoch": 0.064, "grad_norm": 364430.96875, "learning_rate": 2.1367521367521368e-05, "loss": 0.2072, "step": 20 }, { "epoch": 0.096, "grad_norm": 279609.3125, "learning_rate": 3.205128205128205e-05, "loss": 0.215, "step": 30 }, { "epoch": 0.128, "grad_norm": 206238.953125, "learning_rate": 4.2735042735042735e-05, "loss": 0.1963, "step": 40 }, { "epoch": 0.16, "grad_norm": 380894.78125, "learning_rate": 5.341880341880342e-05, "loss": 0.2141, "step": 50 }, { "epoch": 0.192, "grad_norm": 397403.78125, "learning_rate": 6.41025641025641e-05, "loss": 0.2216, "step": 60 }, { "epoch": 0.224, "grad_norm": 208578.28125, "learning_rate": 7.478632478632479e-05, "loss": 0.1989, "step": 70 }, { "epoch": 0.256, "grad_norm": 252869.15625, "learning_rate": 8.547008547008547e-05, "loss": 0.2031, "step": 80 }, { "epoch": 0.288, "grad_norm": 229751.796875, "learning_rate": 9.615384615384617e-05, "loss": 0.1923, "step": 90 }, { "epoch": 0.32, "grad_norm": 236082.515625, "learning_rate": 0.00010683760683760684, "loss": 0.1989, "step": 100 }, { "epoch": 0.352, "grad_norm": 146456.484375, "learning_rate": 0.00011752136752136752, "loss": 0.2219, "step": 110 }, { "epoch": 0.384, "grad_norm": 298750.875, "learning_rate": 0.0001282051282051282, "loss": 0.2128, "step": 120 }, { "epoch": 0.416, "grad_norm": 274198.6875, "learning_rate": 0.0001388888888888889, "loss": 0.2079, "step": 130 }, { "epoch": 0.448, "grad_norm": 337207.125, "learning_rate": 0.00014957264957264957, "loss": 0.2041, "step": 140 }, { "epoch": 0.48, "grad_norm": 292073.5625, "learning_rate": 0.00016025641025641028, "loss": 0.2339, "step": 150 }, { "epoch": 0.512, "grad_norm": 227215.015625, "learning_rate": 0.00017094017094017094, "loss": 0.2227, "step": 160 }, { "epoch": 0.544, "grad_norm": 314843.59375, "learning_rate": 0.00018162393162393162, "loss": 0.2069, "step": 170 }, { "epoch": 0.576, "grad_norm": 412868.6875, "learning_rate": 0.00019230769230769233, "loss": 0.2338, "step": 180 }, { "epoch": 0.608, "grad_norm": 200428.625, "learning_rate": 0.000202991452991453, "loss": 0.2106, "step": 190 }, { "epoch": 0.64, "grad_norm": 215299.359375, "learning_rate": 0.00021367521367521368, "loss": 0.2043, "step": 200 }, { "epoch": 0.672, "grad_norm": 457425.53125, "learning_rate": 0.00022435897435897436, "loss": 0.2194, "step": 210 }, { "epoch": 0.704, "grad_norm": 263560.4375, "learning_rate": 0.00023504273504273504, "loss": 0.2015, "step": 220 }, { "epoch": 0.736, "grad_norm": 348618.3125, "learning_rate": 0.0002457264957264957, "loss": 0.2029, "step": 230 }, { "epoch": 0.768, "grad_norm": 275649.71875, "learning_rate": 0.0002564102564102564, "loss": 0.2159, "step": 240 }, { "epoch": 0.8, "grad_norm": 204672.875, "learning_rate": 0.0002670940170940171, "loss": 0.2032, "step": 250 }, { "epoch": 0.832, "grad_norm": 173324.40625, "learning_rate": 0.0002777777777777778, "loss": 0.1903, "step": 260 }, { "epoch": 0.864, "grad_norm": 282196.0, "learning_rate": 0.00028846153846153843, "loss": 0.2278, "step": 270 }, { "epoch": 0.896, "grad_norm": 327001.0, "learning_rate": 0.00029914529914529915, "loss": 0.2043, "step": 280 }, { "epoch": 0.928, "grad_norm": 218572.71875, "learning_rate": 0.00030982905982905986, "loss": 0.205, "step": 290 }, { "epoch": 0.96, "grad_norm": 234121.171875, "learning_rate": 0.00032051282051282057, "loss": 0.2175, "step": 300 }, { "epoch": 0.992, "grad_norm": 285625.0625, "learning_rate": 0.00033119658119658117, "loss": 0.2349, "step": 310 }, { "epoch": 0.9984, "eval_accuracy": 0.76975, "eval_loss": 0.557490348815918, "eval_runtime": 58.1095, "eval_samples_per_second": 68.836, "eval_steps_per_second": 2.151, "step": 312 }, { "epoch": 1.024, "grad_norm": 275641.71875, "learning_rate": 0.0003418803418803419, "loss": 0.2282, "step": 320 }, { "epoch": 1.056, "grad_norm": 360517.9375, "learning_rate": 0.0003525641025641026, "loss": 0.2088, "step": 330 }, { "epoch": 1.088, "grad_norm": 243124.90625, "learning_rate": 0.00036324786324786325, "loss": 0.2142, "step": 340 }, { "epoch": 1.12, "grad_norm": 348227.40625, "learning_rate": 0.00037393162393162396, "loss": 0.2073, "step": 350 }, { "epoch": 1.152, "grad_norm": 269863.5625, "learning_rate": 0.00038461538461538467, "loss": 0.2141, "step": 360 }, { "epoch": 1.184, "grad_norm": 244875.484375, "learning_rate": 0.00039529914529914527, "loss": 0.2124, "step": 370 }, { "epoch": 1.216, "grad_norm": 233065.015625, "learning_rate": 0.000405982905982906, "loss": 0.2315, "step": 380 }, { "epoch": 1.248, "grad_norm": 285089.71875, "learning_rate": 0.0004166666666666667, "loss": 0.2135, "step": 390 }, { "epoch": 1.28, "grad_norm": 242519.3125, "learning_rate": 0.00042735042735042735, "loss": 0.2131, "step": 400 }, { "epoch": 1.312, "grad_norm": 337405.78125, "learning_rate": 0.00043803418803418806, "loss": 0.1909, "step": 410 }, { "epoch": 1.3439999999999999, "grad_norm": 197278.3125, "learning_rate": 0.0004487179487179487, "loss": 0.196, "step": 420 }, { "epoch": 1.376, "grad_norm": 295350.5625, "learning_rate": 0.0004594017094017094, "loss": 0.215, "step": 430 }, { "epoch": 1.408, "grad_norm": 200897.625, "learning_rate": 0.0004700854700854701, "loss": 0.2103, "step": 440 }, { "epoch": 1.44, "grad_norm": 304716.84375, "learning_rate": 0.0004807692307692308, "loss": 0.1853, "step": 450 }, { "epoch": 1.472, "grad_norm": 337455.09375, "learning_rate": 0.0004914529914529914, "loss": 0.2001, "step": 460 }, { "epoch": 1.504, "grad_norm": 292779.3125, "learning_rate": 0.0005021367521367521, "loss": 0.1941, "step": 470 }, { "epoch": 1.536, "grad_norm": 280025.1875, "learning_rate": 0.0005128205128205128, "loss": 0.2208, "step": 480 }, { "epoch": 1.568, "grad_norm": 309745.53125, "learning_rate": 0.0005235042735042735, "loss": 0.2267, "step": 490 }, { "epoch": 1.6, "grad_norm": 176447.296875, "learning_rate": 0.0005341880341880342, "loss": 0.2137, "step": 500 }, { "epoch": 1.6320000000000001, "grad_norm": 258145.578125, "learning_rate": 0.0005448717948717948, "loss": 0.2039, "step": 510 }, { "epoch": 1.6640000000000001, "grad_norm": 279034.375, "learning_rate": 0.0005555555555555556, "loss": 0.2057, "step": 520 }, { "epoch": 1.696, "grad_norm": 194719.9375, "learning_rate": 0.0005662393162393163, "loss": 0.23, "step": 530 }, { "epoch": 1.728, "grad_norm": 232231.15625, "learning_rate": 0.0005769230769230769, "loss": 0.2142, "step": 540 }, { "epoch": 1.76, "grad_norm": 227828.96875, "learning_rate": 0.0005876068376068377, "loss": 0.2012, "step": 550 }, { "epoch": 1.792, "grad_norm": 260916.8125, "learning_rate": 0.0005982905982905983, "loss": 0.2154, "step": 560 }, { "epoch": 1.8239999999999998, "grad_norm": 332572.875, "learning_rate": 0.0006089743589743589, "loss": 0.2104, "step": 570 }, { "epoch": 1.8559999999999999, "grad_norm": 274341.21875, "learning_rate": 0.0006196581196581197, "loss": 0.1927, "step": 580 }, { "epoch": 1.888, "grad_norm": 221814.578125, "learning_rate": 0.0006303418803418803, "loss": 0.2181, "step": 590 }, { "epoch": 1.92, "grad_norm": 266745.46875, "learning_rate": 0.0006410256410256411, "loss": 0.1901, "step": 600 }, { "epoch": 1.952, "grad_norm": 245638.390625, "learning_rate": 0.0006517094017094017, "loss": 0.2292, "step": 610 }, { "epoch": 1.984, "grad_norm": 305467.09375, "learning_rate": 0.0006623931623931623, "loss": 0.2191, "step": 620 }, { "epoch": 2.0, "eval_accuracy": 0.76175, "eval_loss": 0.5572408437728882, "eval_runtime": 57.8321, "eval_samples_per_second": 69.166, "eval_steps_per_second": 2.161, "step": 625 }, { "epoch": 2.016, "grad_norm": 145681.390625, "learning_rate": 0.0006730769230769232, "loss": 0.1823, "step": 630 }, { "epoch": 2.048, "grad_norm": 380796.4375, "learning_rate": 0.0006837606837606838, "loss": 0.2308, "step": 640 }, { "epoch": 2.08, "grad_norm": 256831.84375, "learning_rate": 0.0006944444444444445, "loss": 0.2223, "step": 650 }, { "epoch": 2.112, "grad_norm": 241387.109375, "learning_rate": 0.0007051282051282052, "loss": 0.2134, "step": 660 }, { "epoch": 2.144, "grad_norm": 361426.84375, "learning_rate": 0.0007158119658119658, "loss": 0.2055, "step": 670 }, { "epoch": 2.176, "grad_norm": 213368.515625, "learning_rate": 0.0007264957264957265, "loss": 0.2068, "step": 680 }, { "epoch": 2.208, "grad_norm": 351293.4375, "learning_rate": 0.0007371794871794872, "loss": 0.2278, "step": 690 }, { "epoch": 2.24, "grad_norm": 351138.28125, "learning_rate": 0.0007478632478632479, "loss": 0.216, "step": 700 }, { "epoch": 2.2720000000000002, "grad_norm": 281240.5, "learning_rate": 0.0007585470085470085, "loss": 0.2064, "step": 710 }, { "epoch": 2.304, "grad_norm": 273138.125, "learning_rate": 0.0007692307692307693, "loss": 0.2021, "step": 720 }, { "epoch": 2.336, "grad_norm": 198611.640625, "learning_rate": 0.0007799145299145299, "loss": 0.218, "step": 730 }, { "epoch": 2.368, "grad_norm": 334765.21875, "learning_rate": 0.0007905982905982905, "loss": 0.1988, "step": 740 }, { "epoch": 2.4, "grad_norm": 321548.25, "learning_rate": 0.0008012820512820514, "loss": 0.2446, "step": 750 }, { "epoch": 2.432, "grad_norm": 230110.046875, "learning_rate": 0.000811965811965812, "loss": 0.213, "step": 760 }, { "epoch": 2.464, "grad_norm": 216522.890625, "learning_rate": 0.0008226495726495727, "loss": 0.2114, "step": 770 }, { "epoch": 2.496, "grad_norm": 290017.125, "learning_rate": 0.0008333333333333334, "loss": 0.2253, "step": 780 }, { "epoch": 2.528, "grad_norm": 307569.59375, "learning_rate": 0.000844017094017094, "loss": 0.1961, "step": 790 }, { "epoch": 2.56, "grad_norm": 258996.3125, "learning_rate": 0.0008547008547008547, "loss": 0.2226, "step": 800 }, { "epoch": 2.592, "grad_norm": 187987.453125, "learning_rate": 0.0008653846153846154, "loss": 0.2018, "step": 810 }, { "epoch": 2.624, "grad_norm": 249946.1875, "learning_rate": 0.0008760683760683761, "loss": 0.1907, "step": 820 }, { "epoch": 2.656, "grad_norm": 265943.46875, "learning_rate": 0.0008867521367521367, "loss": 0.2025, "step": 830 }, { "epoch": 2.6879999999999997, "grad_norm": 232717.453125, "learning_rate": 0.0008974358974358974, "loss": 0.2252, "step": 840 }, { "epoch": 2.7199999999999998, "grad_norm": 212033.515625, "learning_rate": 0.0009081196581196581, "loss": 0.2126, "step": 850 }, { "epoch": 2.752, "grad_norm": 233050.203125, "learning_rate": 0.0009188034188034187, "loss": 0.2281, "step": 860 }, { "epoch": 2.784, "grad_norm": 359475.75, "learning_rate": 0.0009294871794871796, "loss": 0.188, "step": 870 }, { "epoch": 2.816, "grad_norm": 343301.15625, "learning_rate": 0.0009401709401709402, "loss": 0.2316, "step": 880 }, { "epoch": 2.848, "grad_norm": 316784.375, "learning_rate": 0.0009508547008547009, "loss": 0.2014, "step": 890 }, { "epoch": 2.88, "grad_norm": 375741.0, "learning_rate": 0.0009615384615384616, "loss": 0.2196, "step": 900 }, { "epoch": 2.912, "grad_norm": 320915.21875, "learning_rate": 0.0009722222222222222, "loss": 0.2092, "step": 910 }, { "epoch": 2.944, "grad_norm": 254477.09375, "learning_rate": 0.0009829059829059828, "loss": 0.2147, "step": 920 }, { "epoch": 2.976, "grad_norm": 256587.71875, "learning_rate": 0.0009935897435897436, "loss": 0.2124, "step": 930 }, { "epoch": 2.9984, "eval_accuracy": 0.769, "eval_loss": 0.5579845309257507, "eval_runtime": 58.375, "eval_samples_per_second": 68.523, "eval_steps_per_second": 2.141, "step": 937 }, { "epoch": 3.008, "grad_norm": 284885.21875, "learning_rate": 0.0009995251661918329, "loss": 0.2, "step": 940 }, { "epoch": 3.04, "grad_norm": 280128.65625, "learning_rate": 0.000998338081671415, "loss": 0.2062, "step": 950 }, { "epoch": 3.072, "grad_norm": 327251.5, "learning_rate": 0.0009971509971509972, "loss": 0.2285, "step": 960 }, { "epoch": 3.104, "grad_norm": 246022.15625, "learning_rate": 0.0009959639126305793, "loss": 0.2138, "step": 970 }, { "epoch": 3.136, "grad_norm": 149789.59375, "learning_rate": 0.0009947768281101616, "loss": 0.2108, "step": 980 }, { "epoch": 3.168, "grad_norm": 171794.5625, "learning_rate": 0.0009935897435897436, "loss": 0.2167, "step": 990 }, { "epoch": 3.2, "grad_norm": 227827.90625, "learning_rate": 0.000992402659069326, "loss": 0.2043, "step": 1000 }, { "epoch": 3.232, "grad_norm": 196560.046875, "learning_rate": 0.000991215574548908, "loss": 0.2247, "step": 1010 }, { "epoch": 3.2640000000000002, "grad_norm": 198754.34375, "learning_rate": 0.00099002849002849, "loss": 0.2033, "step": 1020 }, { "epoch": 3.296, "grad_norm": 277303.875, "learning_rate": 0.0009888414055080723, "loss": 0.209, "step": 1030 }, { "epoch": 3.328, "grad_norm": 291565.90625, "learning_rate": 0.0009876543209876543, "loss": 0.2248, "step": 1040 }, { "epoch": 3.36, "grad_norm": 231893.078125, "learning_rate": 0.0009864672364672364, "loss": 0.1879, "step": 1050 }, { "epoch": 3.392, "grad_norm": 160630.546875, "learning_rate": 0.0009852801519468187, "loss": 0.2316, "step": 1060 }, { "epoch": 3.424, "grad_norm": 274359.40625, "learning_rate": 0.0009840930674264007, "loss": 0.2191, "step": 1070 }, { "epoch": 3.456, "grad_norm": 287355.15625, "learning_rate": 0.0009829059829059828, "loss": 0.1981, "step": 1080 }, { "epoch": 3.488, "grad_norm": 286551.75, "learning_rate": 0.000981718898385565, "loss": 0.2253, "step": 1090 }, { "epoch": 3.52, "grad_norm": 293656.21875, "learning_rate": 0.0009805318138651471, "loss": 0.2113, "step": 1100 }, { "epoch": 3.552, "grad_norm": 226414.3125, "learning_rate": 0.0009793447293447294, "loss": 0.2366, "step": 1110 }, { "epoch": 3.584, "grad_norm": 304422.28125, "learning_rate": 0.0009781576448243115, "loss": 0.2296, "step": 1120 }, { "epoch": 3.616, "grad_norm": 200736.75, "learning_rate": 0.0009769705603038937, "loss": 0.2085, "step": 1130 }, { "epoch": 3.648, "grad_norm": 191285.28125, "learning_rate": 0.0009757834757834758, "loss": 0.2168, "step": 1140 }, { "epoch": 3.68, "grad_norm": 265826.28125, "learning_rate": 0.000974596391263058, "loss": 0.2242, "step": 1150 }, { "epoch": 3.7119999999999997, "grad_norm": 170613.078125, "learning_rate": 0.00097340930674264, "loss": 0.2015, "step": 1160 }, { "epoch": 3.7439999999999998, "grad_norm": 272692.40625, "learning_rate": 0.0009722222222222222, "loss": 0.2162, "step": 1170 }, { "epoch": 3.776, "grad_norm": 245685.46875, "learning_rate": 0.0009710351377018044, "loss": 0.2166, "step": 1180 }, { "epoch": 3.808, "grad_norm": 359136.125, "learning_rate": 0.0009698480531813865, "loss": 0.2023, "step": 1190 }, { "epoch": 3.84, "grad_norm": 273622.90625, "learning_rate": 0.0009686609686609687, "loss": 0.1966, "step": 1200 }, { "epoch": 3.872, "grad_norm": 291427.96875, "learning_rate": 0.0009674738841405509, "loss": 0.216, "step": 1210 }, { "epoch": 3.904, "grad_norm": 328024.90625, "learning_rate": 0.000966286799620133, "loss": 0.1891, "step": 1220 }, { "epoch": 3.936, "grad_norm": 263930.625, "learning_rate": 0.0009650997150997152, "loss": 0.2155, "step": 1230 }, { "epoch": 3.968, "grad_norm": 214059.265625, "learning_rate": 0.0009639126305792973, "loss": 0.232, "step": 1240 }, { "epoch": 4.0, "grad_norm": 204246.984375, "learning_rate": 0.0009627255460588794, "loss": 0.2207, "step": 1250 }, { "epoch": 4.0, "eval_accuracy": 0.763, "eval_loss": 0.5500344038009644, "eval_runtime": 57.7064, "eval_samples_per_second": 69.316, "eval_steps_per_second": 2.166, "step": 1250 }, { "epoch": 4.032, "grad_norm": 295677.8125, "learning_rate": 0.0009615384615384616, "loss": 0.2247, "step": 1260 }, { "epoch": 4.064, "grad_norm": 197746.8125, "learning_rate": 0.0009603513770180438, "loss": 0.2203, "step": 1270 }, { "epoch": 4.096, "grad_norm": 287209.65625, "learning_rate": 0.0009591642924976258, "loss": 0.2201, "step": 1280 }, { "epoch": 4.128, "grad_norm": 279235.9375, "learning_rate": 0.000957977207977208, "loss": 0.2344, "step": 1290 }, { "epoch": 4.16, "grad_norm": 293263.59375, "learning_rate": 0.0009567901234567902, "loss": 0.2069, "step": 1300 }, { "epoch": 4.192, "grad_norm": 212506.359375, "learning_rate": 0.0009556030389363722, "loss": 0.1787, "step": 1310 }, { "epoch": 4.224, "grad_norm": 274108.71875, "learning_rate": 0.0009544159544159544, "loss": 0.2311, "step": 1320 }, { "epoch": 4.256, "grad_norm": 319637.59375, "learning_rate": 0.0009532288698955365, "loss": 0.2136, "step": 1330 }, { "epoch": 4.288, "grad_norm": 229458.78125, "learning_rate": 0.0009520417853751187, "loss": 0.2019, "step": 1340 }, { "epoch": 4.32, "grad_norm": 307182.09375, "learning_rate": 0.0009508547008547009, "loss": 0.2162, "step": 1350 }, { "epoch": 4.352, "grad_norm": 288039.09375, "learning_rate": 0.000949667616334283, "loss": 0.1945, "step": 1360 }, { "epoch": 4.384, "grad_norm": 253276.65625, "learning_rate": 0.0009484805318138652, "loss": 0.2089, "step": 1370 }, { "epoch": 4.416, "grad_norm": 250874.515625, "learning_rate": 0.0009472934472934474, "loss": 0.2135, "step": 1380 }, { "epoch": 4.448, "grad_norm": 173051.5625, "learning_rate": 0.0009461063627730294, "loss": 0.2005, "step": 1390 }, { "epoch": 4.48, "grad_norm": 198301.75, "learning_rate": 0.0009449192782526116, "loss": 0.2227, "step": 1400 }, { "epoch": 4.5120000000000005, "grad_norm": 252200.671875, "learning_rate": 0.0009437321937321938, "loss": 0.2233, "step": 1410 }, { "epoch": 4.5440000000000005, "grad_norm": 243128.0, "learning_rate": 0.0009425451092117759, "loss": 0.2291, "step": 1420 }, { "epoch": 4.576, "grad_norm": 353883.34375, "learning_rate": 0.000941358024691358, "loss": 0.2207, "step": 1430 }, { "epoch": 4.608, "grad_norm": 223227.578125, "learning_rate": 0.0009401709401709402, "loss": 0.2076, "step": 1440 }, { "epoch": 4.64, "grad_norm": 283902.90625, "learning_rate": 0.0009389838556505223, "loss": 0.1916, "step": 1450 }, { "epoch": 4.672, "grad_norm": 301072.40625, "learning_rate": 0.0009377967711301045, "loss": 0.1951, "step": 1460 }, { "epoch": 4.704, "grad_norm": 300642.03125, "learning_rate": 0.0009366096866096866, "loss": 0.2129, "step": 1470 }, { "epoch": 4.736, "grad_norm": 254449.90625, "learning_rate": 0.0009354226020892687, "loss": 0.2043, "step": 1480 }, { "epoch": 4.768, "grad_norm": 278776.9375, "learning_rate": 0.0009342355175688509, "loss": 0.2139, "step": 1490 }, { "epoch": 4.8, "grad_norm": 169466.421875, "learning_rate": 0.0009330484330484332, "loss": 0.1944, "step": 1500 }, { "epoch": 4.832, "grad_norm": 378268.8125, "learning_rate": 0.0009318613485280152, "loss": 0.1903, "step": 1510 }, { "epoch": 4.864, "grad_norm": 232065.859375, "learning_rate": 0.0009306742640075974, "loss": 0.2188, "step": 1520 }, { "epoch": 4.896, "grad_norm": 222731.53125, "learning_rate": 0.0009294871794871796, "loss": 0.2239, "step": 1530 }, { "epoch": 4.928, "grad_norm": 250957.84375, "learning_rate": 0.0009283000949667616, "loss": 0.2051, "step": 1540 }, { "epoch": 4.96, "grad_norm": 265056.0, "learning_rate": 0.0009271130104463438, "loss": 0.1948, "step": 1550 }, { "epoch": 4.992, "grad_norm": 263161.90625, "learning_rate": 0.000925925925925926, "loss": 0.2143, "step": 1560 }, { "epoch": 4.9984, "eval_accuracy": 0.76525, "eval_loss": 0.5575093030929565, "eval_runtime": 58.0778, "eval_samples_per_second": 68.873, "eval_steps_per_second": 2.152, "step": 1562 }, { "epoch": 5.024, "grad_norm": 242776.59375, "learning_rate": 0.0009247388414055081, "loss": 0.2211, "step": 1570 }, { "epoch": 5.056, "grad_norm": 233410.25, "learning_rate": 0.0009235517568850902, "loss": 0.2181, "step": 1580 }, { "epoch": 5.088, "grad_norm": 293507.25, "learning_rate": 0.0009223646723646724, "loss": 0.1975, "step": 1590 }, { "epoch": 5.12, "grad_norm": 228211.375, "learning_rate": 0.0009211775878442545, "loss": 0.2207, "step": 1600 }, { "epoch": 5.152, "grad_norm": 219766.359375, "learning_rate": 0.0009199905033238367, "loss": 0.185, "step": 1610 }, { "epoch": 5.184, "grad_norm": 243226.15625, "learning_rate": 0.0009188034188034187, "loss": 0.2023, "step": 1620 }, { "epoch": 5.216, "grad_norm": 227722.0625, "learning_rate": 0.0009176163342830009, "loss": 0.2233, "step": 1630 }, { "epoch": 5.248, "grad_norm": 182398.234375, "learning_rate": 0.0009164292497625831, "loss": 0.2204, "step": 1640 }, { "epoch": 5.28, "grad_norm": 227450.109375, "learning_rate": 0.0009152421652421653, "loss": 0.2206, "step": 1650 }, { "epoch": 5.312, "grad_norm": 416922.8125, "learning_rate": 0.0009140550807217474, "loss": 0.2024, "step": 1660 }, { "epoch": 5.344, "grad_norm": 174068.0, "learning_rate": 0.0009128679962013296, "loss": 0.2166, "step": 1670 }, { "epoch": 5.376, "grad_norm": 295677.71875, "learning_rate": 0.0009116809116809118, "loss": 0.2201, "step": 1680 }, { "epoch": 5.408, "grad_norm": 219667.796875, "learning_rate": 0.0009104938271604939, "loss": 0.1898, "step": 1690 }, { "epoch": 5.44, "grad_norm": 196588.1875, "learning_rate": 0.000909306742640076, "loss": 0.1963, "step": 1700 }, { "epoch": 5.4719999999999995, "grad_norm": 198457.703125, "learning_rate": 0.0009081196581196581, "loss": 0.1988, "step": 1710 }, { "epoch": 5.504, "grad_norm": 177612.46875, "learning_rate": 0.0009069325735992403, "loss": 0.1918, "step": 1720 }, { "epoch": 5.536, "grad_norm": 231351.9375, "learning_rate": 0.0009057454890788225, "loss": 0.2098, "step": 1730 }, { "epoch": 5.568, "grad_norm": 281818.4375, "learning_rate": 0.0009045584045584045, "loss": 0.205, "step": 1740 }, { "epoch": 5.6, "grad_norm": 249959.890625, "learning_rate": 0.0009033713200379867, "loss": 0.1928, "step": 1750 }, { "epoch": 5.632, "grad_norm": 192714.796875, "learning_rate": 0.0009021842355175689, "loss": 0.2063, "step": 1760 }, { "epoch": 5.664, "grad_norm": 244175.8125, "learning_rate": 0.0009009971509971509, "loss": 0.2161, "step": 1770 }, { "epoch": 5.696, "grad_norm": 238826.9375, "learning_rate": 0.0008998100664767331, "loss": 0.2042, "step": 1780 }, { "epoch": 5.728, "grad_norm": 174162.5625, "learning_rate": 0.0008986229819563153, "loss": 0.205, "step": 1790 }, { "epoch": 5.76, "grad_norm": 359398.59375, "learning_rate": 0.0008974358974358974, "loss": 0.2137, "step": 1800 }, { "epoch": 5.792, "grad_norm": 324436.6875, "learning_rate": 0.0008962488129154796, "loss": 0.2223, "step": 1810 }, { "epoch": 5.824, "grad_norm": 274255.4375, "learning_rate": 0.0008950617283950618, "loss": 0.2043, "step": 1820 }, { "epoch": 5.856, "grad_norm": 212538.953125, "learning_rate": 0.0008938746438746439, "loss": 0.2054, "step": 1830 }, { "epoch": 5.888, "grad_norm": 170735.9375, "learning_rate": 0.0008926875593542261, "loss": 0.1999, "step": 1840 }, { "epoch": 5.92, "grad_norm": 241050.0625, "learning_rate": 0.0008915004748338082, "loss": 0.2154, "step": 1850 }, { "epoch": 5.952, "grad_norm": 213356.40625, "learning_rate": 0.0008903133903133903, "loss": 0.2068, "step": 1860 }, { "epoch": 5.984, "grad_norm": 252578.8125, "learning_rate": 0.0008891263057929725, "loss": 0.2191, "step": 1870 }, { "epoch": 6.0, "eval_accuracy": 0.77275, "eval_loss": 0.5486457347869873, "eval_runtime": 58.401, "eval_samples_per_second": 68.492, "eval_steps_per_second": 2.14, "step": 1875 }, { "epoch": 6.016, "grad_norm": 195587.765625, "learning_rate": 0.0008879392212725547, "loss": 0.2163, "step": 1880 }, { "epoch": 6.048, "grad_norm": 298880.3125, "learning_rate": 0.0008867521367521367, "loss": 0.2318, "step": 1890 }, { "epoch": 6.08, "grad_norm": 254082.453125, "learning_rate": 0.0008855650522317189, "loss": 0.2296, "step": 1900 }, { "epoch": 6.112, "grad_norm": 209368.6875, "learning_rate": 0.0008843779677113011, "loss": 0.2059, "step": 1910 }, { "epoch": 6.144, "grad_norm": 296605.71875, "learning_rate": 0.0008831908831908832, "loss": 0.1965, "step": 1920 }, { "epoch": 6.176, "grad_norm": 232373.09375, "learning_rate": 0.0008820037986704653, "loss": 0.2062, "step": 1930 }, { "epoch": 6.208, "grad_norm": 212144.140625, "learning_rate": 0.0008808167141500475, "loss": 0.2242, "step": 1940 }, { "epoch": 6.24, "grad_norm": 223206.578125, "learning_rate": 0.0008796296296296296, "loss": 0.2109, "step": 1950 }, { "epoch": 6.272, "grad_norm": 237154.78125, "learning_rate": 0.0008784425451092119, "loss": 0.2167, "step": 1960 }, { "epoch": 6.304, "grad_norm": 185542.953125, "learning_rate": 0.000877255460588794, "loss": 0.2052, "step": 1970 }, { "epoch": 6.336, "grad_norm": 203563.640625, "learning_rate": 0.0008760683760683761, "loss": 0.2096, "step": 1980 }, { "epoch": 6.368, "grad_norm": 285480.0625, "learning_rate": 0.0008748812915479583, "loss": 0.2037, "step": 1990 }, { "epoch": 6.4, "grad_norm": 303778.9375, "learning_rate": 0.0008736942070275405, "loss": 0.1981, "step": 2000 }, { "epoch": 6.432, "grad_norm": 462646.96875, "learning_rate": 0.0008725071225071225, "loss": 0.2092, "step": 2010 }, { "epoch": 6.464, "grad_norm": 461746.4375, "learning_rate": 0.0008713200379867047, "loss": 0.2294, "step": 2020 }, { "epoch": 6.496, "grad_norm": 533448.4375, "learning_rate": 0.0008701329534662869, "loss": 0.2055, "step": 2030 }, { "epoch": 6.5280000000000005, "grad_norm": 548531.5625, "learning_rate": 0.0008689458689458689, "loss": 0.2184, "step": 2040 }, { "epoch": 6.5600000000000005, "grad_norm": 495748.65625, "learning_rate": 0.0008677587844254511, "loss": 0.2184, "step": 2050 }, { "epoch": 6.592, "grad_norm": 543121.0625, "learning_rate": 0.0008665716999050332, "loss": 0.1997, "step": 2060 }, { "epoch": 6.624, "grad_norm": 461117.59375, "learning_rate": 0.0008653846153846154, "loss": 0.2019, "step": 2070 }, { "epoch": 6.656, "grad_norm": 765304.125, "learning_rate": 0.0008641975308641975, "loss": 0.1989, "step": 2080 }, { "epoch": 6.688, "grad_norm": 499025.09375, "learning_rate": 0.0008630104463437796, "loss": 0.2139, "step": 2090 }, { "epoch": 6.72, "grad_norm": 443804.0625, "learning_rate": 0.0008618233618233618, "loss": 0.2212, "step": 2100 }, { "epoch": 6.752, "grad_norm": 513447.84375, "learning_rate": 0.0008606362773029441, "loss": 0.2014, "step": 2110 }, { "epoch": 6.784, "grad_norm": 417790.125, "learning_rate": 0.0008594491927825261, "loss": 0.2092, "step": 2120 }, { "epoch": 6.816, "grad_norm": 580280.5625, "learning_rate": 0.0008582621082621083, "loss": 0.2185, "step": 2130 }, { "epoch": 6.848, "grad_norm": 553312.1875, "learning_rate": 0.0008570750237416905, "loss": 0.2094, "step": 2140 }, { "epoch": 6.88, "grad_norm": 498577.25, "learning_rate": 0.0008558879392212726, "loss": 0.214, "step": 2150 }, { "epoch": 6.912, "grad_norm": 698254.125, "learning_rate": 0.0008547008547008547, "loss": 0.2407, "step": 2160 }, { "epoch": 6.944, "grad_norm": 423589.03125, "learning_rate": 0.0008535137701804369, "loss": 0.2203, "step": 2170 }, { "epoch": 6.976, "grad_norm": 755190.0625, "learning_rate": 0.000852326685660019, "loss": 0.2063, "step": 2180 }, { "epoch": 6.9984, "eval_accuracy": 0.7615, "eval_loss": 0.5593643188476562, "eval_runtime": 58.3849, "eval_samples_per_second": 68.511, "eval_steps_per_second": 2.141, "step": 2187 }, { "epoch": 7.008, "grad_norm": 672768.3125, "learning_rate": 0.0008511396011396012, "loss": 0.2376, "step": 2190 }, { "epoch": 7.04, "grad_norm": 590710.6875, "learning_rate": 0.0008499525166191833, "loss": 0.2167, "step": 2200 }, { "epoch": 7.072, "grad_norm": 451509.625, "learning_rate": 0.0008487654320987654, "loss": 0.1995, "step": 2210 }, { "epoch": 7.104, "grad_norm": 438460.8125, "learning_rate": 0.0008475783475783476, "loss": 0.2262, "step": 2220 }, { "epoch": 7.136, "grad_norm": 538988.8125, "learning_rate": 0.0008463912630579298, "loss": 0.2178, "step": 2230 }, { "epoch": 7.168, "grad_norm": 525871.25, "learning_rate": 0.0008452041785375118, "loss": 0.1961, "step": 2240 }, { "epoch": 7.2, "grad_norm": 420527.3125, "learning_rate": 0.000844017094017094, "loss": 0.2175, "step": 2250 }, { "epoch": 7.232, "grad_norm": 813415.3125, "learning_rate": 0.0008428300094966762, "loss": 0.2185, "step": 2260 }, { "epoch": 7.264, "grad_norm": 471922.9375, "learning_rate": 0.0008416429249762583, "loss": 0.2074, "step": 2270 }, { "epoch": 7.296, "grad_norm": 451017.125, "learning_rate": 0.0008404558404558405, "loss": 0.1841, "step": 2280 }, { "epoch": 7.328, "grad_norm": 506415.46875, "learning_rate": 0.0008392687559354227, "loss": 0.1943, "step": 2290 }, { "epoch": 7.36, "grad_norm": 444371.15625, "learning_rate": 0.0008380816714150048, "loss": 0.217, "step": 2300 }, { "epoch": 7.392, "grad_norm": 377225.34375, "learning_rate": 0.0008368945868945869, "loss": 0.1975, "step": 2310 }, { "epoch": 7.424, "grad_norm": 594714.4375, "learning_rate": 0.000835707502374169, "loss": 0.1969, "step": 2320 }, { "epoch": 7.456, "grad_norm": 500928.84375, "learning_rate": 0.0008345204178537512, "loss": 0.2038, "step": 2330 }, { "epoch": 7.4879999999999995, "grad_norm": 460287.4375, "learning_rate": 0.0008333333333333334, "loss": 0.2156, "step": 2340 }, { "epoch": 7.52, "grad_norm": 448026.53125, "learning_rate": 0.0008321462488129154, "loss": 0.207, "step": 2350 }, { "epoch": 7.552, "grad_norm": 556125.625, "learning_rate": 0.0008309591642924976, "loss": 0.232, "step": 2360 }, { "epoch": 7.584, "grad_norm": 332308.3125, "learning_rate": 0.0008297720797720798, "loss": 0.2147, "step": 2370 }, { "epoch": 7.616, "grad_norm": 400010.5625, "learning_rate": 0.000828584995251662, "loss": 0.2073, "step": 2380 }, { "epoch": 7.648, "grad_norm": 467695.78125, "learning_rate": 0.000827397910731244, "loss": 0.2018, "step": 2390 }, { "epoch": 7.68, "grad_norm": 446946.625, "learning_rate": 0.0008262108262108262, "loss": 0.2226, "step": 2400 }, { "epoch": 7.712, "grad_norm": 557084.5, "learning_rate": 0.0008250237416904083, "loss": 0.2278, "step": 2410 }, { "epoch": 7.744, "grad_norm": 429602.84375, "learning_rate": 0.0008238366571699906, "loss": 0.1832, "step": 2420 }, { "epoch": 7.776, "grad_norm": 388821.59375, "learning_rate": 0.0008226495726495727, "loss": 0.2202, "step": 2430 }, { "epoch": 7.808, "grad_norm": 681185.4375, "learning_rate": 0.0008214624881291548, "loss": 0.2078, "step": 2440 }, { "epoch": 7.84, "grad_norm": 763735.875, "learning_rate": 0.000820275403608737, "loss": 0.2242, "step": 2450 }, { "epoch": 7.872, "grad_norm": 565001.625, "learning_rate": 0.0008190883190883192, "loss": 0.2186, "step": 2460 }, { "epoch": 7.904, "grad_norm": 458239.75, "learning_rate": 0.0008179012345679012, "loss": 0.1843, "step": 2470 }, { "epoch": 7.936, "grad_norm": 609510.125, "learning_rate": 0.0008167141500474834, "loss": 0.1953, "step": 2480 }, { "epoch": 7.968, "grad_norm": 578130.5625, "learning_rate": 0.0008155270655270656, "loss": 0.2354, "step": 2490 }, { "epoch": 8.0, "grad_norm": 743331.375, "learning_rate": 0.0008143399810066477, "loss": 0.207, "step": 2500 }, { "epoch": 8.0, "eval_accuracy": 0.7695, "eval_loss": 0.540494441986084, "eval_runtime": 58.3615, "eval_samples_per_second": 68.538, "eval_steps_per_second": 2.142, "step": 2500 }, { "epoch": 8.032, "grad_norm": 439252.84375, "learning_rate": 0.0008131528964862298, "loss": 0.1953, "step": 2510 }, { "epoch": 8.064, "grad_norm": 574105.1875, "learning_rate": 0.000811965811965812, "loss": 0.2288, "step": 2520 }, { "epoch": 8.096, "grad_norm": 392970.59375, "learning_rate": 0.0008107787274453941, "loss": 0.1998, "step": 2530 }, { "epoch": 8.128, "grad_norm": 415349.78125, "learning_rate": 0.0008095916429249762, "loss": 0.1898, "step": 2540 }, { "epoch": 8.16, "grad_norm": 402935.21875, "learning_rate": 0.0008084045584045584, "loss": 0.209, "step": 2550 }, { "epoch": 8.192, "grad_norm": 710684.375, "learning_rate": 0.0008072174738841405, "loss": 0.2232, "step": 2560 }, { "epoch": 8.224, "grad_norm": 522874.5625, "learning_rate": 0.0008060303893637228, "loss": 0.2241, "step": 2570 }, { "epoch": 8.256, "grad_norm": 469906.8125, "learning_rate": 0.0008048433048433049, "loss": 0.2161, "step": 2580 }, { "epoch": 8.288, "grad_norm": 496419.34375, "learning_rate": 0.000803656220322887, "loss": 0.2131, "step": 2590 }, { "epoch": 8.32, "grad_norm": 641309.9375, "learning_rate": 0.0008024691358024692, "loss": 0.2189, "step": 2600 }, { "epoch": 8.352, "grad_norm": 543925.3125, "learning_rate": 0.0008012820512820514, "loss": 0.2152, "step": 2610 }, { "epoch": 8.384, "grad_norm": 412617.3125, "learning_rate": 0.0008000949667616334, "loss": 0.1995, "step": 2620 }, { "epoch": 8.416, "grad_norm": 595042.1875, "learning_rate": 0.0007989078822412156, "loss": 0.2212, "step": 2630 }, { "epoch": 8.448, "grad_norm": 439380.8125, "learning_rate": 0.0007977207977207978, "loss": 0.2038, "step": 2640 }, { "epoch": 8.48, "grad_norm": 476969.78125, "learning_rate": 0.0007965337132003799, "loss": 0.2097, "step": 2650 }, { "epoch": 8.512, "grad_norm": 371208.65625, "learning_rate": 0.000795346628679962, "loss": 0.2195, "step": 2660 }, { "epoch": 8.544, "grad_norm": 363928.875, "learning_rate": 0.0007941595441595441, "loss": 0.2062, "step": 2670 }, { "epoch": 8.576, "grad_norm": 482233.5625, "learning_rate": 0.0007929724596391263, "loss": 0.2212, "step": 2680 }, { "epoch": 8.608, "grad_norm": 675145.5625, "learning_rate": 0.0007917853751187085, "loss": 0.2247, "step": 2690 }, { "epoch": 8.64, "grad_norm": 385214.0625, "learning_rate": 0.0007905982905982905, "loss": 0.2006, "step": 2700 }, { "epoch": 8.672, "grad_norm": 552452.0625, "learning_rate": 0.0007894112060778727, "loss": 0.2046, "step": 2710 }, { "epoch": 8.704, "grad_norm": 638284.4375, "learning_rate": 0.0007882241215574549, "loss": 0.2222, "step": 2720 }, { "epoch": 8.736, "grad_norm": 314355.84375, "learning_rate": 0.0007870370370370372, "loss": 0.2059, "step": 2730 }, { "epoch": 8.768, "grad_norm": 499934.5625, "learning_rate": 0.0007858499525166192, "loss": 0.2003, "step": 2740 }, { "epoch": 8.8, "grad_norm": 514626.78125, "learning_rate": 0.0007846628679962014, "loss": 0.2202, "step": 2750 }, { "epoch": 8.832, "grad_norm": 743981.875, "learning_rate": 0.0007834757834757835, "loss": 0.223, "step": 2760 }, { "epoch": 8.864, "grad_norm": 372136.40625, "learning_rate": 0.0007822886989553656, "loss": 0.205, "step": 2770 }, { "epoch": 8.896, "grad_norm": 524279.8125, "learning_rate": 0.0007811016144349478, "loss": 0.2241, "step": 2780 }, { "epoch": 8.928, "grad_norm": 526485.4375, "learning_rate": 0.0007799145299145299, "loss": 0.2046, "step": 2790 }, { "epoch": 8.96, "grad_norm": 388172.5625, "learning_rate": 0.0007787274453941121, "loss": 0.2174, "step": 2800 }, { "epoch": 8.992, "grad_norm": 657450.8125, "learning_rate": 0.0007775403608736942, "loss": 0.2273, "step": 2810 }, { "epoch": 8.9984, "eval_accuracy": 0.76725, "eval_loss": 0.5568080544471741, "eval_runtime": 58.1506, "eval_samples_per_second": 68.787, "eval_steps_per_second": 2.15, "step": 2812 }, { "epoch": 9.024, "grad_norm": 381848.6875, "learning_rate": 0.0007763532763532763, "loss": 0.1682, "step": 2820 }, { "epoch": 9.056, "grad_norm": 488400.375, "learning_rate": 0.0007751661918328585, "loss": 0.2072, "step": 2830 }, { "epoch": 9.088, "grad_norm": 557510.875, "learning_rate": 0.0007739791073124407, "loss": 0.2127, "step": 2840 }, { "epoch": 9.12, "grad_norm": 689299.9375, "learning_rate": 0.0007727920227920227, "loss": 0.2191, "step": 2850 }, { "epoch": 9.152, "grad_norm": 543505.4375, "learning_rate": 0.0007716049382716049, "loss": 0.2011, "step": 2860 }, { "epoch": 9.184, "grad_norm": 514595.4375, "learning_rate": 0.0007704178537511871, "loss": 0.1982, "step": 2870 }, { "epoch": 9.216, "grad_norm": 489803.34375, "learning_rate": 0.0007692307692307693, "loss": 0.2097, "step": 2880 }, { "epoch": 9.248, "grad_norm": 415434.8125, "learning_rate": 0.0007680436847103514, "loss": 0.1999, "step": 2890 }, { "epoch": 9.28, "grad_norm": 405549.75, "learning_rate": 0.0007668566001899336, "loss": 0.227, "step": 2900 }, { "epoch": 9.312, "grad_norm": 474433.0625, "learning_rate": 0.0007656695156695157, "loss": 0.2027, "step": 2910 }, { "epoch": 9.344, "grad_norm": 332759.625, "learning_rate": 0.0007644824311490979, "loss": 0.208, "step": 2920 }, { "epoch": 9.376, "grad_norm": 523231.1875, "learning_rate": 0.00076329534662868, "loss": 0.2236, "step": 2930 }, { "epoch": 9.408, "grad_norm": 756157.0625, "learning_rate": 0.0007621082621082621, "loss": 0.2244, "step": 2940 }, { "epoch": 9.44, "grad_norm": 666299.875, "learning_rate": 0.0007609211775878443, "loss": 0.2434, "step": 2950 }, { "epoch": 9.472, "grad_norm": 387654.9375, "learning_rate": 0.0007597340930674265, "loss": 0.1967, "step": 2960 }, { "epoch": 9.504, "grad_norm": 641448.8125, "learning_rate": 0.0007585470085470085, "loss": 0.2211, "step": 2970 }, { "epoch": 9.536, "grad_norm": 626212.25, "learning_rate": 0.0007573599240265907, "loss": 0.2041, "step": 2980 }, { "epoch": 9.568, "grad_norm": 473671.46875, "learning_rate": 0.0007561728395061729, "loss": 0.2098, "step": 2990 }, { "epoch": 9.6, "grad_norm": 595403.9375, "learning_rate": 0.0007549857549857549, "loss": 0.2197, "step": 3000 }, { "epoch": 9.632, "grad_norm": 495355.15625, "learning_rate": 0.0007537986704653371, "loss": 0.2275, "step": 3010 }, { "epoch": 9.664, "grad_norm": 433815.03125, "learning_rate": 0.0007526115859449192, "loss": 0.2411, "step": 3020 }, { "epoch": 9.696, "grad_norm": 386762.625, "learning_rate": 0.0007514245014245015, "loss": 0.1899, "step": 3030 }, { "epoch": 9.728, "grad_norm": 407145.6875, "learning_rate": 0.0007502374169040836, "loss": 0.2029, "step": 3040 }, { "epoch": 9.76, "grad_norm": 573868.8125, "learning_rate": 0.0007490503323836657, "loss": 0.2023, "step": 3050 }, { "epoch": 9.792, "grad_norm": 457500.875, "learning_rate": 0.0007478632478632479, "loss": 0.2094, "step": 3060 }, { "epoch": 9.824, "grad_norm": 477359.5, "learning_rate": 0.0007466761633428301, "loss": 0.2024, "step": 3070 }, { "epoch": 9.856, "grad_norm": 406626.96875, "learning_rate": 0.0007454890788224121, "loss": 0.2074, "step": 3080 }, { "epoch": 9.888, "grad_norm": 440002.4375, "learning_rate": 0.0007443019943019943, "loss": 0.1979, "step": 3090 }, { "epoch": 9.92, "grad_norm": 465033.5625, "learning_rate": 0.0007431149097815765, "loss": 0.2019, "step": 3100 }, { "epoch": 9.952, "grad_norm": 627469.5625, "learning_rate": 0.0007419278252611586, "loss": 0.2066, "step": 3110 }, { "epoch": 9.984, "grad_norm": 495403.3125, "learning_rate": 0.0007407407407407407, "loss": 0.2136, "step": 3120 }, { "epoch": 10.0, "eval_accuracy": 0.77275, "eval_loss": 0.5483222007751465, "eval_runtime": 58.3061, "eval_samples_per_second": 68.603, "eval_steps_per_second": 2.144, "step": 3125 }, { "epoch": 10.016, "grad_norm": 450484.65625, "learning_rate": 0.0007395536562203229, "loss": 0.2118, "step": 3130 }, { "epoch": 10.048, "grad_norm": 329785.25, "learning_rate": 0.000738366571699905, "loss": 0.2155, "step": 3140 }, { "epoch": 10.08, "grad_norm": 587364.0, "learning_rate": 0.0007371794871794872, "loss": 0.2, "step": 3150 }, { "epoch": 10.112, "grad_norm": 518025.4375, "learning_rate": 0.0007359924026590693, "loss": 0.2157, "step": 3160 }, { "epoch": 10.144, "grad_norm": 416655.78125, "learning_rate": 0.0007348053181386514, "loss": 0.1865, "step": 3170 }, { "epoch": 10.176, "grad_norm": 570281.9375, "learning_rate": 0.0007336182336182337, "loss": 0.2008, "step": 3180 }, { "epoch": 10.208, "grad_norm": 401606.65625, "learning_rate": 0.0007324311490978159, "loss": 0.1793, "step": 3190 }, { "epoch": 10.24, "grad_norm": 859104.125, "learning_rate": 0.0007312440645773979, "loss": 0.2221, "step": 3200 }, { "epoch": 10.272, "grad_norm": 652583.1875, "learning_rate": 0.0007300569800569801, "loss": 0.2338, "step": 3210 }, { "epoch": 10.304, "grad_norm": 495821.96875, "learning_rate": 0.0007288698955365623, "loss": 0.1917, "step": 3220 }, { "epoch": 10.336, "grad_norm": 357223.5, "learning_rate": 0.0007276828110161444, "loss": 0.1903, "step": 3230 }, { "epoch": 10.368, "grad_norm": 603392.0, "learning_rate": 0.0007264957264957265, "loss": 0.2161, "step": 3240 }, { "epoch": 10.4, "grad_norm": 329822.15625, "learning_rate": 0.0007253086419753087, "loss": 0.1862, "step": 3250 }, { "epoch": 10.432, "grad_norm": 600281.1875, "learning_rate": 0.0007241215574548908, "loss": 0.2185, "step": 3260 }, { "epoch": 10.464, "grad_norm": 553877.1875, "learning_rate": 0.0007229344729344729, "loss": 0.2306, "step": 3270 }, { "epoch": 10.496, "grad_norm": 571845.375, "learning_rate": 0.0007217473884140551, "loss": 0.2141, "step": 3280 }, { "epoch": 10.528, "grad_norm": 544081.0, "learning_rate": 0.0007205603038936372, "loss": 0.1899, "step": 3290 }, { "epoch": 10.56, "grad_norm": 331369.15625, "learning_rate": 0.0007193732193732194, "loss": 0.2077, "step": 3300 }, { "epoch": 10.592, "grad_norm": 647373.4375, "learning_rate": 0.0007181861348528014, "loss": 0.2368, "step": 3310 }, { "epoch": 10.624, "grad_norm": 522459.0625, "learning_rate": 0.0007169990503323836, "loss": 0.2045, "step": 3320 }, { "epoch": 10.656, "grad_norm": 426327.875, "learning_rate": 0.0007158119658119658, "loss": 0.2038, "step": 3330 }, { "epoch": 10.688, "grad_norm": 511659.5, "learning_rate": 0.0007146248812915481, "loss": 0.2097, "step": 3340 }, { "epoch": 10.72, "grad_norm": 475094.4375, "learning_rate": 0.0007134377967711301, "loss": 0.2063, "step": 3350 }, { "epoch": 10.752, "grad_norm": 652052.625, "learning_rate": 0.0007122507122507123, "loss": 0.2022, "step": 3360 }, { "epoch": 10.784, "grad_norm": 462668.75, "learning_rate": 0.0007110636277302945, "loss": 0.2168, "step": 3370 }, { "epoch": 10.816, "grad_norm": 405784.875, "learning_rate": 0.0007098765432098766, "loss": 0.2003, "step": 3380 }, { "epoch": 10.848, "grad_norm": 721382.25, "learning_rate": 0.0007086894586894587, "loss": 0.2271, "step": 3390 }, { "epoch": 10.88, "grad_norm": 494971.40625, "learning_rate": 0.0007075023741690408, "loss": 0.2471, "step": 3400 }, { "epoch": 10.912, "grad_norm": 414839.5625, "learning_rate": 0.000706315289648623, "loss": 0.2309, "step": 3410 }, { "epoch": 10.943999999999999, "grad_norm": 496315.90625, "learning_rate": 0.0007051282051282052, "loss": 0.2112, "step": 3420 }, { "epoch": 10.975999999999999, "grad_norm": 473891.03125, "learning_rate": 0.0007039411206077872, "loss": 0.2184, "step": 3430 }, { "epoch": 10.9984, "eval_accuracy": 0.7665, "eval_loss": 0.5606116652488708, "eval_runtime": 58.0847, "eval_samples_per_second": 68.865, "eval_steps_per_second": 2.152, "step": 3437 }, { "epoch": 11.008, "grad_norm": 612277.8125, "learning_rate": 0.0007027540360873694, "loss": 0.2077, "step": 3440 }, { "epoch": 11.04, "grad_norm": 470802.4375, "learning_rate": 0.0007015669515669516, "loss": 0.212, "step": 3450 }, { "epoch": 11.072, "grad_norm": 446870.0, "learning_rate": 0.0007003798670465337, "loss": 0.2173, "step": 3460 }, { "epoch": 11.104, "grad_norm": 309706.625, "learning_rate": 0.0006991927825261158, "loss": 0.2054, "step": 3470 }, { "epoch": 11.136, "grad_norm": 427292.09375, "learning_rate": 0.000698005698005698, "loss": 0.1961, "step": 3480 }, { "epoch": 11.168, "grad_norm": 650260.3125, "learning_rate": 0.0006968186134852802, "loss": 0.2146, "step": 3490 }, { "epoch": 11.2, "grad_norm": 416737.78125, "learning_rate": 0.0006956315289648623, "loss": 0.1953, "step": 3500 }, { "epoch": 11.232, "grad_norm": 568974.875, "learning_rate": 0.0006944444444444445, "loss": 0.1921, "step": 3510 }, { "epoch": 11.264, "grad_norm": 630401.9375, "learning_rate": 0.0006932573599240266, "loss": 0.2157, "step": 3520 }, { "epoch": 11.296, "grad_norm": 449739.40625, "learning_rate": 0.0006920702754036088, "loss": 0.2191, "step": 3530 }, { "epoch": 11.328, "grad_norm": 504943.21875, "learning_rate": 0.0006908831908831909, "loss": 0.2161, "step": 3540 }, { "epoch": 11.36, "grad_norm": 621631.25, "learning_rate": 0.000689696106362773, "loss": 0.2137, "step": 3550 }, { "epoch": 11.392, "grad_norm": 474948.28125, "learning_rate": 0.0006885090218423552, "loss": 0.2206, "step": 3560 }, { "epoch": 11.424, "grad_norm": 534952.875, "learning_rate": 0.0006873219373219374, "loss": 0.2098, "step": 3570 }, { "epoch": 11.456, "grad_norm": 449058.09375, "learning_rate": 0.0006861348528015194, "loss": 0.2134, "step": 3580 }, { "epoch": 11.488, "grad_norm": 601122.25, "learning_rate": 0.0006849477682811016, "loss": 0.2208, "step": 3590 }, { "epoch": 11.52, "grad_norm": 363794.46875, "learning_rate": 0.0006837606837606838, "loss": 0.2073, "step": 3600 }, { "epoch": 11.552, "grad_norm": 483605.5, "learning_rate": 0.0006825735992402659, "loss": 0.2195, "step": 3610 }, { "epoch": 11.584, "grad_norm": 533998.8125, "learning_rate": 0.000681386514719848, "loss": 0.2024, "step": 3620 }, { "epoch": 11.616, "grad_norm": 435305.09375, "learning_rate": 0.0006801994301994302, "loss": 0.2183, "step": 3630 }, { "epoch": 11.648, "grad_norm": 567111.9375, "learning_rate": 0.0006790123456790124, "loss": 0.1902, "step": 3640 }, { "epoch": 11.68, "grad_norm": 405747.65625, "learning_rate": 0.0006778252611585946, "loss": 0.2238, "step": 3650 }, { "epoch": 11.712, "grad_norm": 575645.875, "learning_rate": 0.0006766381766381767, "loss": 0.2022, "step": 3660 }, { "epoch": 11.744, "grad_norm": 559379.5, "learning_rate": 0.0006754510921177588, "loss": 0.23, "step": 3670 }, { "epoch": 11.776, "grad_norm": 496193.0, "learning_rate": 0.000674264007597341, "loss": 0.2015, "step": 3680 }, { "epoch": 11.808, "grad_norm": 470825.0, "learning_rate": 0.0006730769230769232, "loss": 0.2249, "step": 3690 }, { "epoch": 11.84, "grad_norm": 507620.0, "learning_rate": 0.0006718898385565052, "loss": 0.2111, "step": 3700 }, { "epoch": 11.872, "grad_norm": 468785.75, "learning_rate": 0.0006707027540360874, "loss": 0.2051, "step": 3710 }, { "epoch": 11.904, "grad_norm": 522254.5, "learning_rate": 0.0006695156695156696, "loss": 0.2035, "step": 3720 }, { "epoch": 11.936, "grad_norm": 453727.625, "learning_rate": 0.0006683285849952516, "loss": 0.2061, "step": 3730 }, { "epoch": 11.968, "grad_norm": 518330.09375, "learning_rate": 0.0006671415004748338, "loss": 0.215, "step": 3740 }, { "epoch": 12.0, "grad_norm": 498891.5, "learning_rate": 0.0006659544159544159, "loss": 0.212, "step": 3750 }, { "epoch": 12.0, "eval_accuracy": 0.761, "eval_loss": 0.5577554106712341, "eval_runtime": 58.301, "eval_samples_per_second": 68.609, "eval_steps_per_second": 2.144, "step": 3750 }, { "epoch": 12.032, "grad_norm": 663838.375, "learning_rate": 0.0006647673314339981, "loss": 0.2126, "step": 3760 }, { "epoch": 12.064, "grad_norm": 532076.6875, "learning_rate": 0.0006635802469135802, "loss": 0.2208, "step": 3770 }, { "epoch": 12.096, "grad_norm": 609268.625, "learning_rate": 0.0006623931623931623, "loss": 0.1939, "step": 3780 }, { "epoch": 12.128, "grad_norm": 383783.15625, "learning_rate": 0.0006612060778727446, "loss": 0.2174, "step": 3790 }, { "epoch": 12.16, "grad_norm": 483893.375, "learning_rate": 0.0006600189933523268, "loss": 0.2083, "step": 3800 }, { "epoch": 12.192, "grad_norm": 527741.625, "learning_rate": 0.0006588319088319088, "loss": 0.1918, "step": 3810 }, { "epoch": 12.224, "grad_norm": 472716.28125, "learning_rate": 0.000657644824311491, "loss": 0.2262, "step": 3820 }, { "epoch": 12.256, "grad_norm": 521136.75, "learning_rate": 0.0006564577397910732, "loss": 0.2014, "step": 3830 }, { "epoch": 12.288, "grad_norm": 350996.34375, "learning_rate": 0.0006552706552706553, "loss": 0.2096, "step": 3840 }, { "epoch": 12.32, "grad_norm": 372622.5625, "learning_rate": 0.0006540835707502374, "loss": 0.2027, "step": 3850 }, { "epoch": 12.352, "grad_norm": 639510.3125, "learning_rate": 0.0006528964862298196, "loss": 0.2272, "step": 3860 }, { "epoch": 12.384, "grad_norm": 417869.4375, "learning_rate": 0.0006517094017094017, "loss": 0.1993, "step": 3870 }, { "epoch": 12.416, "grad_norm": 666939.6875, "learning_rate": 0.0006505223171889839, "loss": 0.2285, "step": 3880 }, { "epoch": 12.448, "grad_norm": 392265.34375, "learning_rate": 0.000649335232668566, "loss": 0.2296, "step": 3890 }, { "epoch": 12.48, "grad_norm": 490617.21875, "learning_rate": 0.0006481481481481481, "loss": 0.2158, "step": 3900 }, { "epoch": 12.512, "grad_norm": 432442.40625, "learning_rate": 0.0006469610636277303, "loss": 0.206, "step": 3910 }, { "epoch": 12.544, "grad_norm": 562610.0, "learning_rate": 0.0006457739791073125, "loss": 0.2248, "step": 3920 }, { "epoch": 12.576, "grad_norm": 646817.1875, "learning_rate": 0.0006445868945868945, "loss": 0.2283, "step": 3930 }, { "epoch": 12.608, "grad_norm": 499516.1875, "learning_rate": 0.0006433998100664767, "loss": 0.2219, "step": 3940 }, { "epoch": 12.64, "grad_norm": 319734.6875, "learning_rate": 0.000642212725546059, "loss": 0.1956, "step": 3950 }, { "epoch": 12.672, "grad_norm": 598636.3125, "learning_rate": 0.0006410256410256411, "loss": 0.2137, "step": 3960 }, { "epoch": 12.704, "grad_norm": 490632.96875, "learning_rate": 0.0006398385565052232, "loss": 0.2297, "step": 3970 }, { "epoch": 12.736, "grad_norm": 1125840.875, "learning_rate": 0.0006386514719848054, "loss": 0.2152, "step": 3980 }, { "epoch": 12.768, "grad_norm": 550740.3125, "learning_rate": 0.0006374643874643875, "loss": 0.2294, "step": 3990 }, { "epoch": 12.8, "grad_norm": 529733.125, "learning_rate": 0.0006362773029439696, "loss": 0.2093, "step": 4000 }, { "epoch": 12.832, "grad_norm": 1042909.75, "learning_rate": 0.0006350902184235518, "loss": 0.1975, "step": 4010 }, { "epoch": 12.864, "grad_norm": 885337.75, "learning_rate": 0.0006339031339031339, "loss": 0.2048, "step": 4020 }, { "epoch": 12.896, "grad_norm": 1166082.375, "learning_rate": 0.0006327160493827161, "loss": 0.2052, "step": 4030 }, { "epoch": 12.928, "grad_norm": 719802.6875, "learning_rate": 0.0006315289648622981, "loss": 0.2104, "step": 4040 }, { "epoch": 12.96, "grad_norm": 1076805.5, "learning_rate": 0.0006303418803418803, "loss": 0.2183, "step": 4050 }, { "epoch": 12.992, "grad_norm": 852746.0, "learning_rate": 0.0006291547958214625, "loss": 0.1903, "step": 4060 }, { "epoch": 12.9984, "eval_accuracy": 0.769, "eval_loss": 0.5370960235595703, "eval_runtime": 58.6162, "eval_samples_per_second": 68.241, "eval_steps_per_second": 2.133, "step": 4062 }, { "epoch": 13.024, "grad_norm": 654611.75, "learning_rate": 0.0006279677113010446, "loss": 0.2046, "step": 4070 }, { "epoch": 13.056, "grad_norm": 891781.0625, "learning_rate": 0.0006267806267806267, "loss": 0.2293, "step": 4080 }, { "epoch": 13.088, "grad_norm": 1099780.75, "learning_rate": 0.0006255935422602089, "loss": 0.2096, "step": 4090 }, { "epoch": 13.12, "grad_norm": 1592056.875, "learning_rate": 0.0006244064577397912, "loss": 0.2054, "step": 4100 }, { "epoch": 13.152, "grad_norm": 1195057.75, "learning_rate": 0.0006232193732193733, "loss": 0.215, "step": 4110 }, { "epoch": 13.184, "grad_norm": 864565.3125, "learning_rate": 0.0006220322886989554, "loss": 0.1961, "step": 4120 }, { "epoch": 13.216, "grad_norm": 1274778.875, "learning_rate": 0.0006208452041785375, "loss": 0.21, "step": 4130 }, { "epoch": 13.248, "grad_norm": 1391209.125, "learning_rate": 0.0006196581196581197, "loss": 0.2291, "step": 4140 }, { "epoch": 13.28, "grad_norm": 1173761.25, "learning_rate": 0.0006184710351377019, "loss": 0.2186, "step": 4150 }, { "epoch": 13.312, "grad_norm": 1076280.625, "learning_rate": 0.0006172839506172839, "loss": 0.2193, "step": 4160 }, { "epoch": 13.344, "grad_norm": 809341.9375, "learning_rate": 0.0006160968660968661, "loss": 0.1914, "step": 4170 }, { "epoch": 13.376, "grad_norm": 1084379.0, "learning_rate": 0.0006149097815764483, "loss": 0.2064, "step": 4180 }, { "epoch": 13.408, "grad_norm": 833949.4375, "learning_rate": 0.0006137226970560304, "loss": 0.2139, "step": 4190 }, { "epoch": 13.44, "grad_norm": 1337724.25, "learning_rate": 0.0006125356125356125, "loss": 0.1961, "step": 4200 }, { "epoch": 13.472, "grad_norm": 964052.1875, "learning_rate": 0.0006113485280151947, "loss": 0.2057, "step": 4210 }, { "epoch": 13.504, "grad_norm": 1420036.875, "learning_rate": 0.0006101614434947768, "loss": 0.2339, "step": 4220 }, { "epoch": 13.536, "grad_norm": 1049277.375, "learning_rate": 0.0006089743589743589, "loss": 0.1966, "step": 4230 }, { "epoch": 13.568, "grad_norm": 1342290.875, "learning_rate": 0.0006077872744539411, "loss": 0.2193, "step": 4240 }, { "epoch": 13.6, "grad_norm": 1058454.25, "learning_rate": 0.0006066001899335233, "loss": 0.2211, "step": 4250 }, { "epoch": 13.632, "grad_norm": 802834.5, "learning_rate": 0.0006054131054131055, "loss": 0.206, "step": 4260 }, { "epoch": 13.664, "grad_norm": 939160.625, "learning_rate": 0.0006042260208926876, "loss": 0.2077, "step": 4270 }, { "epoch": 13.696, "grad_norm": 1321520.75, "learning_rate": 0.0006030389363722697, "loss": 0.2347, "step": 4280 }, { "epoch": 13.728, "grad_norm": Infinity, "learning_rate": 0.0006018518518518519, "loss": 0.2057, "step": 4290 }, { "epoch": 13.76, "grad_norm": 1250876.625, "learning_rate": 0.0006006647673314341, "loss": 0.2216, "step": 4300 }, { "epoch": 13.792, "grad_norm": 1065398.25, "learning_rate": 0.0005994776828110161, "loss": 0.2212, "step": 4310 }, { "epoch": 13.824, "grad_norm": 923487.375, "learning_rate": 0.0005982905982905983, "loss": 0.2153, "step": 4320 }, { "epoch": 13.856, "grad_norm": 951445.8125, "learning_rate": 0.0005971035137701805, "loss": 0.2128, "step": 4330 }, { "epoch": 13.888, "grad_norm": 1154133.5, "learning_rate": 0.0005959164292497626, "loss": 0.2115, "step": 4340 }, { "epoch": 13.92, "grad_norm": 991618.625, "learning_rate": 0.0005947293447293447, "loss": 0.2101, "step": 4350 }, { "epoch": 13.952, "grad_norm": 974332.625, "learning_rate": 0.0005935422602089269, "loss": 0.2084, "step": 4360 }, { "epoch": 13.984, "grad_norm": 1015135.1875, "learning_rate": 0.000592355175688509, "loss": 0.2487, "step": 4370 }, { "epoch": 14.0, "eval_accuracy": 0.7645, "eval_loss": 0.5582425594329834, "eval_runtime": 61.8491, "eval_samples_per_second": 64.674, "eval_steps_per_second": 2.021, "step": 4375 }, { "epoch": 14.016, "grad_norm": NaN, "learning_rate": 0.0005911680911680912, "loss": 0.2202, "step": 4380 }, { "epoch": 14.048, "grad_norm": 725532.25, "learning_rate": 0.0005899810066476732, "loss": 0.1936, "step": 4390 }, { "epoch": 14.08, "grad_norm": Infinity, "learning_rate": 0.0005887939221272554, "loss": 0.2276, "step": 4400 }, { "epoch": 14.112, "grad_norm": Infinity, "learning_rate": 0.0005876068376068377, "loss": 0.215, "step": 4410 }, { "epoch": 14.144, "grad_norm": 687965.625, "learning_rate": 0.0005864197530864199, "loss": 0.2159, "step": 4420 }, { "epoch": 14.176, "grad_norm": 1061211.25, "learning_rate": 0.0005852326685660019, "loss": 0.193, "step": 4430 }, { "epoch": 14.208, "grad_norm": 872460.75, "learning_rate": 0.0005840455840455841, "loss": 0.2167, "step": 4440 }, { "epoch": 14.24, "grad_norm": 1020573.6875, "learning_rate": 0.0005828584995251662, "loss": 0.1868, "step": 4450 }, { "epoch": 14.272, "grad_norm": Infinity, "learning_rate": 0.0005816714150047484, "loss": 0.1987, "step": 4460 }, { "epoch": 14.304, "grad_norm": 1052701.25, "learning_rate": 0.0005804843304843305, "loss": 0.2031, "step": 4470 }, { "epoch": 14.336, "grad_norm": Infinity, "learning_rate": 0.0005792972459639126, "loss": 0.2252, "step": 4480 }, { "epoch": 14.368, "grad_norm": 679741.25, "learning_rate": 0.0005781101614434948, "loss": 0.1823, "step": 4490 }, { "epoch": 14.4, "grad_norm": 1170840.375, "learning_rate": 0.0005769230769230769, "loss": 0.2216, "step": 4500 }, { "epoch": 14.432, "grad_norm": Infinity, "learning_rate": 0.000575735992402659, "loss": 0.235, "step": 4510 }, { "epoch": 14.464, "grad_norm": Infinity, "learning_rate": 0.0005745489078822412, "loss": 0.1994, "step": 4520 }, { "epoch": 14.496, "grad_norm": 1133208.125, "learning_rate": 0.0005733618233618234, "loss": 0.2242, "step": 4530 }, { "epoch": 14.528, "grad_norm": 1266796.75, "learning_rate": 0.0005721747388414054, "loss": 0.2125, "step": 4540 }, { "epoch": 14.56, "grad_norm": 1040882.375, "learning_rate": 0.0005709876543209876, "loss": 0.1994, "step": 4550 }, { "epoch": 14.592, "grad_norm": 896784.9375, "learning_rate": 0.0005698005698005699, "loss": 0.2121, "step": 4560 }, { "epoch": 14.624, "grad_norm": Infinity, "learning_rate": 0.000568613485280152, "loss": 0.2284, "step": 4570 }, { "epoch": 14.656, "grad_norm": 1164985.0, "learning_rate": 0.0005674264007597341, "loss": 0.2249, "step": 4580 }, { "epoch": 14.688, "grad_norm": Infinity, "learning_rate": 0.0005662393162393163, "loss": 0.2214, "step": 4590 }, { "epoch": 14.72, "grad_norm": 939342.125, "learning_rate": 0.0005650522317188984, "loss": 0.2015, "step": 4600 }, { "epoch": 14.752, "grad_norm": 636754.0625, "learning_rate": 0.0005638651471984806, "loss": 0.2103, "step": 4610 }, { "epoch": 14.784, "grad_norm": 1226811.0, "learning_rate": 0.0005626780626780627, "loss": 0.2122, "step": 4620 }, { "epoch": 14.816, "grad_norm": 1043980.875, "learning_rate": 0.0005614909781576448, "loss": 0.1971, "step": 4630 }, { "epoch": 14.848, "grad_norm": 1471149.375, "learning_rate": 0.000560303893637227, "loss": 0.2176, "step": 4640 }, { "epoch": 14.88, "grad_norm": 935474.1875, "learning_rate": 0.0005591168091168092, "loss": 0.2057, "step": 4650 }, { "epoch": 14.912, "grad_norm": 938039.5625, "learning_rate": 0.0005579297245963912, "loss": 0.2064, "step": 4660 }, { "epoch": 14.943999999999999, "grad_norm": Infinity, "learning_rate": 0.0005567426400759734, "loss": 0.2427, "step": 4670 }, { "epoch": 14.975999999999999, "grad_norm": 1067872.875, "learning_rate": 0.0005555555555555556, "loss": 0.2025, "step": 4680 }, { "epoch": 14.9984, "eval_accuracy": 0.77775, "eval_loss": 0.5413510203361511, "eval_runtime": 58.0126, "eval_samples_per_second": 68.951, "eval_steps_per_second": 2.155, "step": 4687 }, { "epoch": 15.008, "grad_norm": 1081480.5, "learning_rate": 0.0005543684710351377, "loss": 0.2015, "step": 4690 }, { "epoch": 15.04, "grad_norm": 1213699.125, "learning_rate": 0.0005531813865147198, "loss": 0.2256, "step": 4700 }, { "epoch": 15.072, "grad_norm": 1344064.75, "learning_rate": 0.0005519943019943021, "loss": 0.2214, "step": 4710 }, { "epoch": 15.104, "grad_norm": 849799.5, "learning_rate": 0.0005508072174738842, "loss": 0.2023, "step": 4720 }, { "epoch": 15.136, "grad_norm": 982086.5, "learning_rate": 0.0005496201329534663, "loss": 0.1956, "step": 4730 }, { "epoch": 15.168, "grad_norm": 744843.3125, "learning_rate": 0.0005484330484330485, "loss": 0.203, "step": 4740 }, { "epoch": 15.2, "grad_norm": Infinity, "learning_rate": 0.0005472459639126306, "loss": 0.1863, "step": 4750 }, { "epoch": 15.232, "grad_norm": 1000041.625, "learning_rate": 0.0005460588793922128, "loss": 0.2262, "step": 4760 }, { "epoch": 15.264, "grad_norm": Infinity, "learning_rate": 0.0005448717948717948, "loss": 0.1778, "step": 4770 }, { "epoch": 15.296, "grad_norm": 761818.25, "learning_rate": 0.000543684710351377, "loss": 0.2134, "step": 4780 }, { "epoch": 15.328, "grad_norm": Infinity, "learning_rate": 0.0005424976258309592, "loss": 0.2246, "step": 4790 }, { "epoch": 15.36, "grad_norm": 1044292.875, "learning_rate": 0.0005413105413105413, "loss": 0.2246, "step": 4800 }, { "epoch": 15.392, "grad_norm": 740541.6875, "learning_rate": 0.0005401234567901234, "loss": 0.1943, "step": 4810 }, { "epoch": 15.424, "grad_norm": 980434.4375, "learning_rate": 0.0005389363722697056, "loss": 0.1904, "step": 4820 }, { "epoch": 15.456, "grad_norm": 1246758.625, "learning_rate": 0.0005377492877492877, "loss": 0.2293, "step": 4830 }, { "epoch": 15.488, "grad_norm": 1481804.875, "learning_rate": 0.0005365622032288699, "loss": 0.2251, "step": 4840 }, { "epoch": 15.52, "grad_norm": 956031.5, "learning_rate": 0.000535375118708452, "loss": 0.2239, "step": 4850 }, { "epoch": 15.552, "grad_norm": 1267528.25, "learning_rate": 0.0005341880341880342, "loss": 0.2263, "step": 4860 }, { "epoch": 15.584, "grad_norm": Infinity, "learning_rate": 0.0005330009496676164, "loss": 0.2041, "step": 4870 }, { "epoch": 15.616, "grad_norm": 925829.875, "learning_rate": 0.0005318138651471986, "loss": 0.1838, "step": 4880 }, { "epoch": 15.648, "grad_norm": Infinity, "learning_rate": 0.0005306267806267806, "loss": 0.2163, "step": 4890 }, { "epoch": 15.68, "grad_norm": 1228792.375, "learning_rate": 0.0005294396961063628, "loss": 0.2032, "step": 4900 }, { "epoch": 15.712, "grad_norm": 838715.125, "learning_rate": 0.000528252611585945, "loss": 0.1991, "step": 4910 }, { "epoch": 15.744, "grad_norm": 964459.0, "learning_rate": 0.0005270655270655271, "loss": 0.2263, "step": 4920 }, { "epoch": 15.776, "grad_norm": 798003.25, "learning_rate": 0.0005258784425451092, "loss": 0.199, "step": 4930 }, { "epoch": 15.808, "grad_norm": Infinity, "learning_rate": 0.0005246913580246914, "loss": 0.2218, "step": 4940 }, { "epoch": 15.84, "grad_norm": 1483930.875, "learning_rate": 0.0005235042735042735, "loss": 0.2356, "step": 4950 }, { "epoch": 15.872, "grad_norm": Infinity, "learning_rate": 0.0005223171889838556, "loss": 0.2247, "step": 4960 }, { "epoch": 15.904, "grad_norm": Infinity, "learning_rate": 0.0005211301044634378, "loss": 0.1986, "step": 4970 }, { "epoch": 15.936, "grad_norm": 1194618.5, "learning_rate": 0.0005199430199430199, "loss": 0.2393, "step": 4980 }, { "epoch": 15.968, "grad_norm": 837843.75, "learning_rate": 0.0005187559354226021, "loss": 0.2107, "step": 4990 }, { "epoch": 16.0, "grad_norm": 912574.75, "learning_rate": 0.0005175688509021842, "loss": 0.2207, "step": 5000 }, { "epoch": 16.0, "eval_accuracy": 0.7685, "eval_loss": 0.5375534892082214, "eval_runtime": 58.3981, "eval_samples_per_second": 68.495, "eval_steps_per_second": 2.14, "step": 5000 }, { "epoch": 16.032, "grad_norm": Infinity, "learning_rate": 0.0005163817663817663, "loss": 0.2148, "step": 5010 }, { "epoch": 16.064, "grad_norm": Infinity, "learning_rate": 0.0005151946818613486, "loss": 0.2201, "step": 5020 }, { "epoch": 16.096, "grad_norm": 924105.5, "learning_rate": 0.0005140075973409308, "loss": 0.2183, "step": 5030 }, { "epoch": 16.128, "grad_norm": 1160855.125, "learning_rate": 0.0005128205128205128, "loss": 0.2145, "step": 5040 }, { "epoch": 16.16, "grad_norm": 1011611.5, "learning_rate": 0.000511633428300095, "loss": 0.2183, "step": 5050 }, { "epoch": 16.192, "grad_norm": 1249247.375, "learning_rate": 0.0005104463437796772, "loss": 0.2077, "step": 5060 }, { "epoch": 16.224, "grad_norm": Infinity, "learning_rate": 0.0005092592592592593, "loss": 0.204, "step": 5070 }, { "epoch": 16.256, "grad_norm": 1121213.75, "learning_rate": 0.0005080721747388414, "loss": 0.2116, "step": 5080 }, { "epoch": 16.288, "grad_norm": 938717.25, "learning_rate": 0.0005068850902184235, "loss": 0.2033, "step": 5090 }, { "epoch": 16.32, "grad_norm": Infinity, "learning_rate": 0.0005056980056980057, "loss": 0.2219, "step": 5100 }, { "epoch": 16.352, "grad_norm": 952882.625, "learning_rate": 0.0005045109211775879, "loss": 0.226, "step": 5110 }, { "epoch": 16.384, "grad_norm": 1363143.5, "learning_rate": 0.0005033238366571699, "loss": 0.2174, "step": 5120 }, { "epoch": 16.416, "grad_norm": 917260.875, "learning_rate": 0.0005021367521367521, "loss": 0.1846, "step": 5130 }, { "epoch": 16.448, "grad_norm": Infinity, "learning_rate": 0.0005009496676163343, "loss": 0.1981, "step": 5140 }, { "epoch": 16.48, "grad_norm": 1652643.0, "learning_rate": 0.0004997625830959164, "loss": 0.2138, "step": 5150 }, { "epoch": 16.512, "grad_norm": 942965.1875, "learning_rate": 0.0004985754985754986, "loss": 0.207, "step": 5160 }, { "epoch": 16.544, "grad_norm": 1058725.375, "learning_rate": 0.0004973884140550808, "loss": 0.1956, "step": 5170 }, { "epoch": 16.576, "grad_norm": 1011255.875, "learning_rate": 0.000496201329534663, "loss": 0.2182, "step": 5180 }, { "epoch": 16.608, "grad_norm": 949710.75, "learning_rate": 0.000495014245014245, "loss": 0.205, "step": 5190 }, { "epoch": 16.64, "grad_norm": 939276.4375, "learning_rate": 0.0004938271604938272, "loss": 0.2186, "step": 5200 }, { "epoch": 16.672, "grad_norm": 1041804.3125, "learning_rate": 0.0004926400759734093, "loss": 0.2115, "step": 5210 }, { "epoch": 16.704, "grad_norm": 976855.5625, "learning_rate": 0.0004914529914529914, "loss": 0.2076, "step": 5220 }, { "epoch": 16.736, "grad_norm": 881195.3125, "learning_rate": 0.0004902659069325736, "loss": 0.2018, "step": 5230 }, { "epoch": 16.768, "grad_norm": 1045999.0, "learning_rate": 0.0004890788224121557, "loss": 0.2095, "step": 5240 }, { "epoch": 16.8, "grad_norm": 1055538.875, "learning_rate": 0.0004878917378917379, "loss": 0.2254, "step": 5250 }, { "epoch": 16.832, "grad_norm": 1142565.625, "learning_rate": 0.00048670465337132, "loss": 0.2343, "step": 5260 }, { "epoch": 16.864, "grad_norm": 1099992.25, "learning_rate": 0.0004855175688509022, "loss": 0.219, "step": 5270 }, { "epoch": 16.896, "grad_norm": Infinity, "learning_rate": 0.00048433048433048435, "loss": 0.2458, "step": 5280 }, { "epoch": 16.928, "grad_norm": 946468.9375, "learning_rate": 0.0004831433998100665, "loss": 0.1974, "step": 5290 }, { "epoch": 16.96, "grad_norm": 1041877.25, "learning_rate": 0.00048195631528964863, "loss": 0.217, "step": 5300 }, { "epoch": 16.992, "grad_norm": 1171594.625, "learning_rate": 0.0004807692307692308, "loss": 0.2012, "step": 5310 }, { "epoch": 16.9984, "eval_accuracy": 0.77025, "eval_loss": 0.548855721950531, "eval_runtime": 58.086, "eval_samples_per_second": 68.863, "eval_steps_per_second": 2.152, "step": 5312 }, { "epoch": 17.024, "grad_norm": 980497.25, "learning_rate": 0.0004795821462488129, "loss": 0.2157, "step": 5320 }, { "epoch": 17.056, "grad_norm": 748758.875, "learning_rate": 0.0004783950617283951, "loss": 0.2023, "step": 5330 }, { "epoch": 17.088, "grad_norm": 1243409.375, "learning_rate": 0.0004772079772079772, "loss": 0.2254, "step": 5340 }, { "epoch": 17.12, "grad_norm": 738619.375, "learning_rate": 0.00047602089268755936, "loss": 0.1961, "step": 5350 }, { "epoch": 17.152, "grad_norm": 1088919.75, "learning_rate": 0.0004748338081671415, "loss": 0.2067, "step": 5360 }, { "epoch": 17.184, "grad_norm": 966854.0, "learning_rate": 0.0004736467236467237, "loss": 0.2142, "step": 5370 }, { "epoch": 17.216, "grad_norm": 713046.25, "learning_rate": 0.0004724596391263058, "loss": 0.2114, "step": 5380 }, { "epoch": 17.248, "grad_norm": 962303.5625, "learning_rate": 0.00047127255460588797, "loss": 0.2236, "step": 5390 }, { "epoch": 17.28, "grad_norm": 1023706.8125, "learning_rate": 0.0004700854700854701, "loss": 0.1875, "step": 5400 }, { "epoch": 17.312, "grad_norm": 950098.0625, "learning_rate": 0.00046889838556505225, "loss": 0.197, "step": 5410 }, { "epoch": 17.344, "grad_norm": 904872.25, "learning_rate": 0.00046771130104463437, "loss": 0.2195, "step": 5420 }, { "epoch": 17.376, "grad_norm": Infinity, "learning_rate": 0.0004665242165242166, "loss": 0.1911, "step": 5430 }, { "epoch": 17.408, "grad_norm": Infinity, "learning_rate": 0.0004653371320037987, "loss": 0.2201, "step": 5440 }, { "epoch": 17.44, "grad_norm": 922537.875, "learning_rate": 0.0004641500474833808, "loss": 0.2058, "step": 5450 }, { "epoch": 17.472, "grad_norm": 1170017.125, "learning_rate": 0.000462962962962963, "loss": 0.2242, "step": 5460 }, { "epoch": 17.504, "grad_norm": 967685.5, "learning_rate": 0.0004617758784425451, "loss": 0.2135, "step": 5470 }, { "epoch": 17.536, "grad_norm": 866546.0, "learning_rate": 0.00046058879392212726, "loss": 0.206, "step": 5480 }, { "epoch": 17.568, "grad_norm": 1577800.875, "learning_rate": 0.0004594017094017094, "loss": 0.2175, "step": 5490 }, { "epoch": 17.6, "grad_norm": NaN, "learning_rate": 0.00045821462488129154, "loss": 0.2106, "step": 5500 }, { "epoch": 17.632, "grad_norm": 1285103.625, "learning_rate": 0.0004570275403608737, "loss": 0.229, "step": 5510 }, { "epoch": 17.664, "grad_norm": Infinity, "learning_rate": 0.0004558404558404559, "loss": 0.203, "step": 5520 }, { "epoch": 17.696, "grad_norm": Infinity, "learning_rate": 0.000454653371320038, "loss": 0.2219, "step": 5530 }, { "epoch": 17.728, "grad_norm": 1200600.875, "learning_rate": 0.00045346628679962016, "loss": 0.2182, "step": 5540 }, { "epoch": 17.76, "grad_norm": 1468528.75, "learning_rate": 0.00045227920227920227, "loss": 0.1966, "step": 5550 }, { "epoch": 17.792, "grad_norm": 955159.25, "learning_rate": 0.00045109211775878444, "loss": 0.241, "step": 5560 }, { "epoch": 17.824, "grad_norm": 996693.75, "learning_rate": 0.00044990503323836655, "loss": 0.1966, "step": 5570 }, { "epoch": 17.856, "grad_norm": 1332255.875, "learning_rate": 0.0004487179487179487, "loss": 0.2169, "step": 5580 }, { "epoch": 17.888, "grad_norm": 1062334.375, "learning_rate": 0.0004475308641975309, "loss": 0.2109, "step": 5590 }, { "epoch": 17.92, "grad_norm": Infinity, "learning_rate": 0.00044634377967711305, "loss": 0.2107, "step": 5600 }, { "epoch": 17.951999999999998, "grad_norm": 916163.4375, "learning_rate": 0.00044515669515669517, "loss": 0.2343, "step": 5610 }, { "epoch": 17.984, "grad_norm": 1330298.25, "learning_rate": 0.00044396961063627733, "loss": 0.2198, "step": 5620 }, { "epoch": 18.0, "eval_accuracy": 0.77525, "eval_loss": 0.5560150146484375, "eval_runtime": 58.4205, "eval_samples_per_second": 68.469, "eval_steps_per_second": 2.14, "step": 5625 }, { "epoch": 18.016, "grad_norm": 1105357.875, "learning_rate": 0.00044278252611585945, "loss": 0.1996, "step": 5630 }, { "epoch": 18.048, "grad_norm": 1458348.125, "learning_rate": 0.0004415954415954416, "loss": 0.2199, "step": 5640 }, { "epoch": 18.08, "grad_norm": 970556.25, "learning_rate": 0.0004404083570750237, "loss": 0.2191, "step": 5650 }, { "epoch": 18.112, "grad_norm": Infinity, "learning_rate": 0.00043922127255460595, "loss": 0.2307, "step": 5660 }, { "epoch": 18.144, "grad_norm": 976057.1875, "learning_rate": 0.00043803418803418806, "loss": 0.1966, "step": 5670 }, { "epoch": 18.176, "grad_norm": 833928.25, "learning_rate": 0.00043684710351377023, "loss": 0.2268, "step": 5680 }, { "epoch": 18.208, "grad_norm": 931736.9375, "learning_rate": 0.00043566001899335234, "loss": 0.2261, "step": 5690 }, { "epoch": 18.24, "grad_norm": 1063323.25, "learning_rate": 0.00043447293447293445, "loss": 0.1793, "step": 5700 }, { "epoch": 18.272, "grad_norm": Infinity, "learning_rate": 0.0004332858499525166, "loss": 0.2064, "step": 5710 }, { "epoch": 18.304, "grad_norm": Infinity, "learning_rate": 0.00043209876543209873, "loss": 0.2174, "step": 5720 }, { "epoch": 18.336, "grad_norm": 745713.25, "learning_rate": 0.0004309116809116809, "loss": 0.1981, "step": 5730 }, { "epoch": 18.368, "grad_norm": 960917.375, "learning_rate": 0.00042972459639126307, "loss": 0.2114, "step": 5740 }, { "epoch": 18.4, "grad_norm": Infinity, "learning_rate": 0.00042853751187084524, "loss": 0.2171, "step": 5750 }, { "epoch": 18.432, "grad_norm": 825171.125, "learning_rate": 0.00042735042735042735, "loss": 0.1824, "step": 5760 }, { "epoch": 18.464, "grad_norm": 769558.375, "learning_rate": 0.0004261633428300095, "loss": 0.2074, "step": 5770 }, { "epoch": 18.496, "grad_norm": 772380.4375, "learning_rate": 0.00042497625830959163, "loss": 0.2226, "step": 5780 }, { "epoch": 18.528, "grad_norm": 1635508.0, "learning_rate": 0.0004237891737891738, "loss": 0.2241, "step": 5790 }, { "epoch": 18.56, "grad_norm": 977012.4375, "learning_rate": 0.0004226020892687559, "loss": 0.1885, "step": 5800 }, { "epoch": 18.592, "grad_norm": 1104309.375, "learning_rate": 0.0004214150047483381, "loss": 0.2285, "step": 5810 }, { "epoch": 18.624, "grad_norm": 1027584.875, "learning_rate": 0.00042022792022792025, "loss": 0.2192, "step": 5820 }, { "epoch": 18.656, "grad_norm": 1208884.875, "learning_rate": 0.0004190408357075024, "loss": 0.2229, "step": 5830 }, { "epoch": 18.688, "grad_norm": 1337715.5, "learning_rate": 0.0004178537511870845, "loss": 0.2235, "step": 5840 }, { "epoch": 18.72, "grad_norm": 1082770.625, "learning_rate": 0.0004166666666666667, "loss": 0.1985, "step": 5850 }, { "epoch": 18.752, "grad_norm": 1084099.75, "learning_rate": 0.0004154795821462488, "loss": 0.2001, "step": 5860 }, { "epoch": 18.784, "grad_norm": 970450.75, "learning_rate": 0.000414292497625831, "loss": 0.2216, "step": 5870 }, { "epoch": 18.816, "grad_norm": 811773.875, "learning_rate": 0.0004131054131054131, "loss": 0.2318, "step": 5880 }, { "epoch": 18.848, "grad_norm": 839901.625, "learning_rate": 0.0004119183285849953, "loss": 0.2338, "step": 5890 }, { "epoch": 18.88, "grad_norm": 1070109.25, "learning_rate": 0.0004107312440645774, "loss": 0.2, "step": 5900 }, { "epoch": 18.912, "grad_norm": 795279.5, "learning_rate": 0.0004095441595441596, "loss": 0.1919, "step": 5910 }, { "epoch": 18.944, "grad_norm": 1289021.25, "learning_rate": 0.0004083570750237417, "loss": 0.1973, "step": 5920 }, { "epoch": 18.976, "grad_norm": 1276458.875, "learning_rate": 0.00040716999050332387, "loss": 0.2171, "step": 5930 }, { "epoch": 18.9984, "eval_accuracy": 0.7725, "eval_loss": 0.557004988193512, "eval_runtime": 57.9489, "eval_samples_per_second": 69.026, "eval_steps_per_second": 2.157, "step": 5937 }, { "epoch": 19.008, "grad_norm": Infinity, "learning_rate": 0.000405982905982906, "loss": 0.2159, "step": 5940 }, { "epoch": 19.04, "grad_norm": 974318.875, "learning_rate": 0.0004047958214624881, "loss": 0.2077, "step": 5950 }, { "epoch": 19.072, "grad_norm": 1039002.875, "learning_rate": 0.00040360873694207026, "loss": 0.2189, "step": 5960 }, { "epoch": 19.104, "grad_norm": 852144.6875, "learning_rate": 0.00040242165242165243, "loss": 0.1993, "step": 5970 }, { "epoch": 19.136, "grad_norm": 1005178.375, "learning_rate": 0.0004012345679012346, "loss": 0.2116, "step": 5980 }, { "epoch": 19.168, "grad_norm": 1007026.75, "learning_rate": 0.0004000474833808167, "loss": 0.1993, "step": 5990 }, { "epoch": 19.2, "grad_norm": 1615725.875, "learning_rate": 0.0003988603988603989, "loss": 0.2174, "step": 6000 }, { "epoch": 19.232, "grad_norm": Infinity, "learning_rate": 0.000397673314339981, "loss": 0.2001, "step": 6010 }, { "epoch": 19.264, "grad_norm": Infinity, "learning_rate": 0.00039648622981956316, "loss": 0.1909, "step": 6020 }, { "epoch": 19.296, "grad_norm": Infinity, "learning_rate": 0.00039529914529914527, "loss": 0.1939, "step": 6030 }, { "epoch": 19.328, "grad_norm": Infinity, "learning_rate": 0.00039411206077872744, "loss": 0.216, "step": 6040 }, { "epoch": 19.36, "grad_norm": NaN, "learning_rate": 0.0003929249762583096, "loss": 0.2347, "step": 6050 }, { "epoch": 19.392, "grad_norm": Infinity, "learning_rate": 0.0003917378917378918, "loss": 0.2205, "step": 6060 }, { "epoch": 19.424, "grad_norm": Infinity, "learning_rate": 0.0003905508072174739, "loss": 0.2024, "step": 6070 }, { "epoch": 19.456, "grad_norm": NaN, "learning_rate": 0.00038936372269705605, "loss": 0.2393, "step": 6080 }, { "epoch": 19.488, "grad_norm": Infinity, "learning_rate": 0.00038817663817663817, "loss": 0.2286, "step": 6090 }, { "epoch": 19.52, "grad_norm": Infinity, "learning_rate": 0.00038698955365622033, "loss": 0.223, "step": 6100 }, { "epoch": 19.552, "grad_norm": Infinity, "learning_rate": 0.00038580246913580245, "loss": 0.2015, "step": 6110 }, { "epoch": 19.584, "grad_norm": Infinity, "learning_rate": 0.00038461538461538467, "loss": 0.2163, "step": 6120 }, { "epoch": 19.616, "grad_norm": 1314457.375, "learning_rate": 0.0003834283000949668, "loss": 0.2002, "step": 6130 }, { "epoch": 19.648, "grad_norm": Infinity, "learning_rate": 0.00038224121557454895, "loss": 0.2215, "step": 6140 }, { "epoch": 19.68, "grad_norm": NaN, "learning_rate": 0.00038105413105413106, "loss": 0.2117, "step": 6150 }, { "epoch": 19.712, "grad_norm": NaN, "learning_rate": 0.00037986704653371323, "loss": 0.2196, "step": 6160 }, { "epoch": 19.744, "grad_norm": Infinity, "learning_rate": 0.00037867996201329534, "loss": 0.2303, "step": 6170 }, { "epoch": 19.776, "grad_norm": Infinity, "learning_rate": 0.00037749287749287746, "loss": 0.2374, "step": 6180 }, { "epoch": 19.808, "grad_norm": NaN, "learning_rate": 0.0003763057929724596, "loss": 0.2177, "step": 6190 }, { "epoch": 19.84, "grad_norm": NaN, "learning_rate": 0.0003751187084520418, "loss": 0.2005, "step": 6200 }, { "epoch": 19.872, "grad_norm": Infinity, "learning_rate": 0.00037393162393162396, "loss": 0.2228, "step": 6210 }, { "epoch": 19.904, "grad_norm": Infinity, "learning_rate": 0.00037274453941120607, "loss": 0.201, "step": 6220 }, { "epoch": 19.936, "grad_norm": NaN, "learning_rate": 0.00037155745489078824, "loss": 0.205, "step": 6230 }, { "epoch": 19.968, "grad_norm": 1964826.75, "learning_rate": 0.00037037037037037035, "loss": 0.2213, "step": 6240 }, { "epoch": 20.0, "grad_norm": Infinity, "learning_rate": 0.0003691832858499525, "loss": 0.2116, "step": 6250 }, { "epoch": 20.0, "eval_accuracy": 0.7625, "eval_loss": 0.5622299909591675, "eval_runtime": 58.6144, "eval_samples_per_second": 68.243, "eval_steps_per_second": 2.133, "step": 6250 }, { "epoch": 20.032, "grad_norm": NaN, "learning_rate": 0.00036799620132953463, "loss": 0.2141, "step": 6260 }, { "epoch": 20.064, "grad_norm": Infinity, "learning_rate": 0.00036680911680911685, "loss": 0.2003, "step": 6270 }, { "epoch": 20.096, "grad_norm": Infinity, "learning_rate": 0.00036562203228869897, "loss": 0.2081, "step": 6280 }, { "epoch": 20.128, "grad_norm": 1561355.625, "learning_rate": 0.00036443494776828113, "loss": 0.2078, "step": 6290 }, { "epoch": 20.16, "grad_norm": Infinity, "learning_rate": 0.00036324786324786325, "loss": 0.2105, "step": 6300 }, { "epoch": 20.192, "grad_norm": Infinity, "learning_rate": 0.0003620607787274454, "loss": 0.2395, "step": 6310 }, { "epoch": 20.224, "grad_norm": Infinity, "learning_rate": 0.00036087369420702753, "loss": 0.2105, "step": 6320 }, { "epoch": 20.256, "grad_norm": Infinity, "learning_rate": 0.0003596866096866097, "loss": 0.216, "step": 6330 }, { "epoch": 20.288, "grad_norm": Infinity, "learning_rate": 0.0003584995251661918, "loss": 0.224, "step": 6340 }, { "epoch": 20.32, "grad_norm": Infinity, "learning_rate": 0.00035731244064577403, "loss": 0.2054, "step": 6350 }, { "epoch": 20.352, "grad_norm": 1508851.0, "learning_rate": 0.00035612535612535614, "loss": 0.2151, "step": 6360 }, { "epoch": 20.384, "grad_norm": 1946181.375, "learning_rate": 0.0003549382716049383, "loss": 0.2173, "step": 6370 }, { "epoch": 20.416, "grad_norm": Infinity, "learning_rate": 0.0003537511870845204, "loss": 0.2035, "step": 6380 }, { "epoch": 20.448, "grad_norm": Infinity, "learning_rate": 0.0003525641025641026, "loss": 0.2215, "step": 6390 }, { "epoch": 20.48, "grad_norm": Infinity, "learning_rate": 0.0003513770180436847, "loss": 0.205, "step": 6400 }, { "epoch": 20.512, "grad_norm": Infinity, "learning_rate": 0.00035018993352326687, "loss": 0.1974, "step": 6410 }, { "epoch": 20.544, "grad_norm": 1950701.25, "learning_rate": 0.000349002849002849, "loss": 0.2087, "step": 6420 }, { "epoch": 20.576, "grad_norm": Infinity, "learning_rate": 0.00034781576448243115, "loss": 0.2136, "step": 6430 }, { "epoch": 20.608, "grad_norm": NaN, "learning_rate": 0.0003466286799620133, "loss": 0.2051, "step": 6440 }, { "epoch": 20.64, "grad_norm": Infinity, "learning_rate": 0.00034544159544159543, "loss": 0.2181, "step": 6450 }, { "epoch": 20.672, "grad_norm": Infinity, "learning_rate": 0.0003442545109211776, "loss": 0.2367, "step": 6460 }, { "epoch": 20.704, "grad_norm": Infinity, "learning_rate": 0.0003430674264007597, "loss": 0.1962, "step": 6470 }, { "epoch": 20.736, "grad_norm": 1704853.25, "learning_rate": 0.0003418803418803419, "loss": 0.201, "step": 6480 }, { "epoch": 20.768, "grad_norm": Infinity, "learning_rate": 0.000340693257359924, "loss": 0.2186, "step": 6490 }, { "epoch": 20.8, "grad_norm": Infinity, "learning_rate": 0.0003395061728395062, "loss": 0.2131, "step": 6500 }, { "epoch": 20.832, "grad_norm": Infinity, "learning_rate": 0.00033831908831908833, "loss": 0.223, "step": 6510 }, { "epoch": 20.864, "grad_norm": 1689417.75, "learning_rate": 0.0003371320037986705, "loss": 0.2086, "step": 6520 }, { "epoch": 20.896, "grad_norm": Infinity, "learning_rate": 0.0003359449192782526, "loss": 0.2109, "step": 6530 }, { "epoch": 20.928, "grad_norm": Infinity, "learning_rate": 0.0003347578347578348, "loss": 0.2014, "step": 6540 }, { "epoch": 20.96, "grad_norm": Infinity, "learning_rate": 0.0003335707502374169, "loss": 0.2174, "step": 6550 }, { "epoch": 20.992, "grad_norm": Infinity, "learning_rate": 0.00033238366571699906, "loss": 0.2162, "step": 6560 }, { "epoch": 20.9984, "eval_accuracy": 0.76675, "eval_loss": 0.5587143898010254, "eval_runtime": 59.4419, "eval_samples_per_second": 67.293, "eval_steps_per_second": 2.103, "step": 6562 }, { "epoch": 21.024, "grad_norm": 1291432.5, "learning_rate": 0.00033119658119658117, "loss": 0.2018, "step": 6570 }, { "epoch": 21.056, "grad_norm": NaN, "learning_rate": 0.0003300094966761634, "loss": 0.2017, "step": 6580 }, { "epoch": 21.088, "grad_norm": Infinity, "learning_rate": 0.0003288224121557455, "loss": 0.2061, "step": 6590 }, { "epoch": 21.12, "grad_norm": NaN, "learning_rate": 0.00032763532763532767, "loss": 0.2068, "step": 6600 }, { "epoch": 21.152, "grad_norm": Infinity, "learning_rate": 0.0003264482431149098, "loss": 0.2055, "step": 6610 }, { "epoch": 21.184, "grad_norm": Infinity, "learning_rate": 0.00032526115859449195, "loss": 0.2069, "step": 6620 }, { "epoch": 21.216, "grad_norm": 1809235.625, "learning_rate": 0.00032407407407407406, "loss": 0.1952, "step": 6630 }, { "epoch": 21.248, "grad_norm": NaN, "learning_rate": 0.00032288698955365623, "loss": 0.2042, "step": 6640 }, { "epoch": 21.28, "grad_norm": Infinity, "learning_rate": 0.00032169990503323835, "loss": 0.2071, "step": 6650 }, { "epoch": 21.312, "grad_norm": Infinity, "learning_rate": 0.00032051282051282057, "loss": 0.2131, "step": 6660 }, { "epoch": 21.344, "grad_norm": Infinity, "learning_rate": 0.0003193257359924027, "loss": 0.2297, "step": 6670 }, { "epoch": 21.376, "grad_norm": NaN, "learning_rate": 0.0003181386514719848, "loss": 0.2083, "step": 6680 }, { "epoch": 21.408, "grad_norm": 1783178.125, "learning_rate": 0.00031695156695156696, "loss": 0.1937, "step": 6690 }, { "epoch": 21.44, "grad_norm": NaN, "learning_rate": 0.0003157644824311491, "loss": 0.2248, "step": 6700 }, { "epoch": 21.472, "grad_norm": Infinity, "learning_rate": 0.00031457739791073124, "loss": 0.2114, "step": 6710 }, { "epoch": 21.504, "grad_norm": Infinity, "learning_rate": 0.00031339031339031335, "loss": 0.2521, "step": 6720 }, { "epoch": 21.536, "grad_norm": NaN, "learning_rate": 0.0003122032288698956, "loss": 0.2058, "step": 6730 }, { "epoch": 21.568, "grad_norm": NaN, "learning_rate": 0.0003110161443494777, "loss": 0.1923, "step": 6740 }, { "epoch": 21.6, "grad_norm": Infinity, "learning_rate": 0.00030982905982905986, "loss": 0.2405, "step": 6750 }, { "epoch": 21.632, "grad_norm": 1427268.125, "learning_rate": 0.00030864197530864197, "loss": 0.187, "step": 6760 }, { "epoch": 21.664, "grad_norm": 1644058.125, "learning_rate": 0.00030745489078822414, "loss": 0.2343, "step": 6770 }, { "epoch": 21.696, "grad_norm": 2057941.25, "learning_rate": 0.00030626780626780625, "loss": 0.2114, "step": 6780 }, { "epoch": 21.728, "grad_norm": Infinity, "learning_rate": 0.0003050807217473884, "loss": 0.2144, "step": 6790 }, { "epoch": 21.76, "grad_norm": Infinity, "learning_rate": 0.00030389363722697053, "loss": 0.2318, "step": 6800 }, { "epoch": 21.792, "grad_norm": Infinity, "learning_rate": 0.00030270655270655275, "loss": 0.2327, "step": 6810 }, { "epoch": 21.824, "grad_norm": Infinity, "learning_rate": 0.00030151946818613486, "loss": 0.2207, "step": 6820 }, { "epoch": 21.856, "grad_norm": Infinity, "learning_rate": 0.00030033238366571703, "loss": 0.2115, "step": 6830 }, { "epoch": 21.888, "grad_norm": Infinity, "learning_rate": 0.00029914529914529915, "loss": 0.1802, "step": 6840 }, { "epoch": 21.92, "grad_norm": Infinity, "learning_rate": 0.0002979582146248813, "loss": 0.211, "step": 6850 }, { "epoch": 21.951999999999998, "grad_norm": Infinity, "learning_rate": 0.0002967711301044634, "loss": 0.2275, "step": 6860 }, { "epoch": 21.984, "grad_norm": Infinity, "learning_rate": 0.0002955840455840456, "loss": 0.224, "step": 6870 }, { "epoch": 22.0, "eval_accuracy": 0.77125, "eval_loss": 0.545555591583252, "eval_runtime": 58.3787, "eval_samples_per_second": 68.518, "eval_steps_per_second": 2.141, "step": 6875 }, { "epoch": 22.016, "grad_norm": 2120620.25, "learning_rate": 0.0002943969610636277, "loss": 0.2007, "step": 6880 }, { "epoch": 22.048, "grad_norm": Infinity, "learning_rate": 0.00029320987654320993, "loss": 0.2052, "step": 6890 }, { "epoch": 22.08, "grad_norm": NaN, "learning_rate": 0.00029202279202279204, "loss": 0.2109, "step": 6900 }, { "epoch": 22.112, "grad_norm": Infinity, "learning_rate": 0.0002908357075023742, "loss": 0.1884, "step": 6910 }, { "epoch": 22.144, "grad_norm": Infinity, "learning_rate": 0.0002896486229819563, "loss": 0.2099, "step": 6920 }, { "epoch": 22.176, "grad_norm": Infinity, "learning_rate": 0.00028846153846153843, "loss": 0.2147, "step": 6930 }, { "epoch": 22.208, "grad_norm": Infinity, "learning_rate": 0.0002872744539411206, "loss": 0.2116, "step": 6940 }, { "epoch": 22.24, "grad_norm": Infinity, "learning_rate": 0.0002860873694207027, "loss": 0.2044, "step": 6950 }, { "epoch": 22.272, "grad_norm": 1670116.875, "learning_rate": 0.00028490028490028494, "loss": 0.2107, "step": 6960 }, { "epoch": 22.304, "grad_norm": Infinity, "learning_rate": 0.00028371320037986705, "loss": 0.1955, "step": 6970 }, { "epoch": 22.336, "grad_norm": Infinity, "learning_rate": 0.0002825261158594492, "loss": 0.214, "step": 6980 }, { "epoch": 22.368, "grad_norm": Infinity, "learning_rate": 0.00028133903133903133, "loss": 0.2164, "step": 6990 }, { "epoch": 22.4, "grad_norm": Infinity, "learning_rate": 0.0002801519468186135, "loss": 0.2199, "step": 7000 }, { "epoch": 22.432, "grad_norm": Infinity, "learning_rate": 0.0002789648622981956, "loss": 0.2066, "step": 7010 }, { "epoch": 22.464, "grad_norm": 2138083.0, "learning_rate": 0.0002777777777777778, "loss": 0.2208, "step": 7020 }, { "epoch": 22.496, "grad_norm": Infinity, "learning_rate": 0.0002765906932573599, "loss": 0.2207, "step": 7030 }, { "epoch": 22.528, "grad_norm": Infinity, "learning_rate": 0.0002754036087369421, "loss": 0.2121, "step": 7040 }, { "epoch": 22.56, "grad_norm": Infinity, "learning_rate": 0.0002742165242165242, "loss": 0.2049, "step": 7050 }, { "epoch": 22.592, "grad_norm": Infinity, "learning_rate": 0.0002730294396961064, "loss": 0.2268, "step": 7060 }, { "epoch": 22.624, "grad_norm": Infinity, "learning_rate": 0.0002718423551756885, "loss": 0.2251, "step": 7070 }, { "epoch": 22.656, "grad_norm": 1488781.25, "learning_rate": 0.0002706552706552707, "loss": 0.2041, "step": 7080 }, { "epoch": 22.688, "grad_norm": Infinity, "learning_rate": 0.0002694681861348528, "loss": 0.2164, "step": 7090 }, { "epoch": 22.72, "grad_norm": Infinity, "learning_rate": 0.00026828110161443495, "loss": 0.2033, "step": 7100 }, { "epoch": 22.752, "grad_norm": Infinity, "learning_rate": 0.0002670940170940171, "loss": 0.1891, "step": 7110 }, { "epoch": 22.784, "grad_norm": Infinity, "learning_rate": 0.0002659069325735993, "loss": 0.2017, "step": 7120 }, { "epoch": 22.816, "grad_norm": Infinity, "learning_rate": 0.0002647198480531814, "loss": 0.234, "step": 7130 }, { "epoch": 22.848, "grad_norm": Infinity, "learning_rate": 0.00026353276353276357, "loss": 0.2086, "step": 7140 }, { "epoch": 22.88, "grad_norm": Infinity, "learning_rate": 0.0002623456790123457, "loss": 0.2041, "step": 7150 }, { "epoch": 22.912, "grad_norm": Infinity, "learning_rate": 0.0002611585944919278, "loss": 0.2287, "step": 7160 }, { "epoch": 22.944, "grad_norm": NaN, "learning_rate": 0.00025997150997150996, "loss": 0.2055, "step": 7170 }, { "epoch": 22.976, "grad_norm": Infinity, "learning_rate": 0.0002587844254510921, "loss": 0.212, "step": 7180 }, { "epoch": 22.9984, "eval_accuracy": 0.76525, "eval_loss": 0.5647286772727966, "eval_runtime": 58.2038, "eval_samples_per_second": 68.724, "eval_steps_per_second": 2.148, "step": 7187 }, { "epoch": 23.008, "grad_norm": Infinity, "learning_rate": 0.0002575973409306743, "loss": 0.2204, "step": 7190 }, { "epoch": 23.04, "grad_norm": Infinity, "learning_rate": 0.0002564102564102564, "loss": 0.2154, "step": 7200 }, { "epoch": 23.072, "grad_norm": Infinity, "learning_rate": 0.0002552231718898386, "loss": 0.2123, "step": 7210 }, { "epoch": 23.104, "grad_norm": Infinity, "learning_rate": 0.0002540360873694207, "loss": 0.2144, "step": 7220 }, { "epoch": 23.136, "grad_norm": Infinity, "learning_rate": 0.00025284900284900286, "loss": 0.193, "step": 7230 }, { "epoch": 23.168, "grad_norm": 1738463.0, "learning_rate": 0.00025166191832858497, "loss": 0.1986, "step": 7240 }, { "epoch": 23.2, "grad_norm": Infinity, "learning_rate": 0.00025047483380816714, "loss": 0.2222, "step": 7250 }, { "epoch": 23.232, "grad_norm": Infinity, "learning_rate": 0.0002492877492877493, "loss": 0.2246, "step": 7260 }, { "epoch": 23.264, "grad_norm": NaN, "learning_rate": 0.0002481006647673315, "loss": 0.2103, "step": 7270 }, { "epoch": 23.296, "grad_norm": Infinity, "learning_rate": 0.0002469135802469136, "loss": 0.208, "step": 7280 }, { "epoch": 23.328, "grad_norm": Infinity, "learning_rate": 0.0002457264957264957, "loss": 0.2187, "step": 7290 }, { "epoch": 23.36, "grad_norm": Infinity, "learning_rate": 0.00024453941120607787, "loss": 0.211, "step": 7300 }, { "epoch": 23.392, "grad_norm": 1851251.625, "learning_rate": 0.00024335232668566, "loss": 0.1992, "step": 7310 }, { "epoch": 23.424, "grad_norm": Infinity, "learning_rate": 0.00024216524216524217, "loss": 0.1985, "step": 7320 }, { "epoch": 23.456, "grad_norm": Infinity, "learning_rate": 0.00024097815764482431, "loss": 0.2002, "step": 7330 }, { "epoch": 23.488, "grad_norm": Infinity, "learning_rate": 0.00023979107312440645, "loss": 0.2025, "step": 7340 }, { "epoch": 23.52, "grad_norm": Infinity, "learning_rate": 0.0002386039886039886, "loss": 0.1979, "step": 7350 }, { "epoch": 23.552, "grad_norm": Infinity, "learning_rate": 0.00023741690408357076, "loss": 0.1975, "step": 7360 }, { "epoch": 23.584, "grad_norm": Infinity, "learning_rate": 0.0002362298195631529, "loss": 0.2347, "step": 7370 }, { "epoch": 23.616, "grad_norm": Infinity, "learning_rate": 0.00023504273504273504, "loss": 0.2143, "step": 7380 }, { "epoch": 23.648, "grad_norm": Infinity, "learning_rate": 0.00023385565052231718, "loss": 0.2299, "step": 7390 }, { "epoch": 23.68, "grad_norm": Infinity, "learning_rate": 0.00023266856600189935, "loss": 0.2, "step": 7400 }, { "epoch": 23.712, "grad_norm": Infinity, "learning_rate": 0.0002314814814814815, "loss": 0.2225, "step": 7410 }, { "epoch": 23.744, "grad_norm": Infinity, "learning_rate": 0.00023029439696106363, "loss": 0.2049, "step": 7420 }, { "epoch": 23.776, "grad_norm": Infinity, "learning_rate": 0.00022910731244064577, "loss": 0.2258, "step": 7430 }, { "epoch": 23.808, "grad_norm": Infinity, "learning_rate": 0.00022792022792022794, "loss": 0.2008, "step": 7440 }, { "epoch": 23.84, "grad_norm": Infinity, "learning_rate": 0.00022673314339981008, "loss": 0.2079, "step": 7450 }, { "epoch": 23.872, "grad_norm": Infinity, "learning_rate": 0.00022554605887939222, "loss": 0.1949, "step": 7460 }, { "epoch": 23.904, "grad_norm": Infinity, "learning_rate": 0.00022435897435897436, "loss": 0.1853, "step": 7470 }, { "epoch": 23.936, "grad_norm": NaN, "learning_rate": 0.00022317188983855653, "loss": 0.2076, "step": 7480 }, { "epoch": 23.968, "grad_norm": Infinity, "learning_rate": 0.00022198480531813867, "loss": 0.2082, "step": 7490 }, { "epoch": 24.0, "grad_norm": Infinity, "learning_rate": 0.0002207977207977208, "loss": 0.2084, "step": 7500 }, { "epoch": 24.0, "eval_accuracy": 0.76725, "eval_loss": 0.5532636046409607, "eval_runtime": 58.6171, "eval_samples_per_second": 68.239, "eval_steps_per_second": 2.132, "step": 7500 }, { "epoch": 24.032, "grad_norm": Infinity, "learning_rate": 0.00021961063627730297, "loss": 0.2117, "step": 7510 }, { "epoch": 24.064, "grad_norm": NaN, "learning_rate": 0.00021842355175688511, "loss": 0.2147, "step": 7520 }, { "epoch": 24.096, "grad_norm": Infinity, "learning_rate": 0.00021723646723646723, "loss": 0.2179, "step": 7530 }, { "epoch": 24.128, "grad_norm": Infinity, "learning_rate": 0.00021604938271604937, "loss": 0.1879, "step": 7540 }, { "epoch": 24.16, "grad_norm": Infinity, "learning_rate": 0.00021486229819563153, "loss": 0.1989, "step": 7550 }, { "epoch": 24.192, "grad_norm": Infinity, "learning_rate": 0.00021367521367521368, "loss": 0.2281, "step": 7560 }, { "epoch": 24.224, "grad_norm": Infinity, "learning_rate": 0.00021248812915479582, "loss": 0.2159, "step": 7570 }, { "epoch": 24.256, "grad_norm": Infinity, "learning_rate": 0.00021130104463437796, "loss": 0.2186, "step": 7580 }, { "epoch": 24.288, "grad_norm": Infinity, "learning_rate": 0.00021011396011396012, "loss": 0.2257, "step": 7590 }, { "epoch": 24.32, "grad_norm": Infinity, "learning_rate": 0.00020892687559354226, "loss": 0.2098, "step": 7600 }, { "epoch": 24.352, "grad_norm": 1342396.875, "learning_rate": 0.0002077397910731244, "loss": 0.2101, "step": 7610 }, { "epoch": 24.384, "grad_norm": Infinity, "learning_rate": 0.00020655270655270654, "loss": 0.213, "step": 7620 }, { "epoch": 24.416, "grad_norm": Infinity, "learning_rate": 0.0002053656220322887, "loss": 0.211, "step": 7630 }, { "epoch": 24.448, "grad_norm": NaN, "learning_rate": 0.00020417853751187085, "loss": 0.187, "step": 7640 }, { "epoch": 24.48, "grad_norm": Infinity, "learning_rate": 0.000202991452991453, "loss": 0.2285, "step": 7650 }, { "epoch": 24.512, "grad_norm": 1773714.0, "learning_rate": 0.00020180436847103513, "loss": 0.2177, "step": 7660 }, { "epoch": 24.544, "grad_norm": Infinity, "learning_rate": 0.0002006172839506173, "loss": 0.2064, "step": 7670 }, { "epoch": 24.576, "grad_norm": Infinity, "learning_rate": 0.00019943019943019944, "loss": 0.2344, "step": 7680 }, { "epoch": 24.608, "grad_norm": Infinity, "learning_rate": 0.00019824311490978158, "loss": 0.2185, "step": 7690 }, { "epoch": 24.64, "grad_norm": Infinity, "learning_rate": 0.00019705603038936372, "loss": 0.2115, "step": 7700 }, { "epoch": 24.672, "grad_norm": Infinity, "learning_rate": 0.0001958689458689459, "loss": 0.2214, "step": 7710 }, { "epoch": 24.704, "grad_norm": Infinity, "learning_rate": 0.00019468186134852803, "loss": 0.2071, "step": 7720 }, { "epoch": 24.736, "grad_norm": Infinity, "learning_rate": 0.00019349477682811017, "loss": 0.2109, "step": 7730 }, { "epoch": 24.768, "grad_norm": Infinity, "learning_rate": 0.00019230769230769233, "loss": 0.195, "step": 7740 }, { "epoch": 24.8, "grad_norm": 1795345.25, "learning_rate": 0.00019112060778727447, "loss": 0.2347, "step": 7750 }, { "epoch": 24.832, "grad_norm": Infinity, "learning_rate": 0.00018993352326685662, "loss": 0.2028, "step": 7760 }, { "epoch": 24.864, "grad_norm": Infinity, "learning_rate": 0.00018874643874643873, "loss": 0.231, "step": 7770 }, { "epoch": 24.896, "grad_norm": Infinity, "learning_rate": 0.0001875593542260209, "loss": 0.2069, "step": 7780 }, { "epoch": 24.928, "grad_norm": Infinity, "learning_rate": 0.00018637226970560304, "loss": 0.1928, "step": 7790 }, { "epoch": 24.96, "grad_norm": 1809381.25, "learning_rate": 0.00018518518518518518, "loss": 0.1927, "step": 7800 }, { "epoch": 24.992, "grad_norm": Infinity, "learning_rate": 0.00018399810066476732, "loss": 0.2226, "step": 7810 }, { "epoch": 24.9984, "eval_accuracy": 0.7705, "eval_loss": 0.5434042811393738, "eval_runtime": 59.0686, "eval_samples_per_second": 67.718, "eval_steps_per_second": 2.116, "step": 7812 }, { "epoch": 25.024, "grad_norm": Infinity, "learning_rate": 0.00018281101614434948, "loss": 0.2133, "step": 7820 }, { "epoch": 25.056, "grad_norm": Infinity, "learning_rate": 0.00018162393162393162, "loss": 0.2168, "step": 7830 }, { "epoch": 25.088, "grad_norm": Infinity, "learning_rate": 0.00018043684710351376, "loss": 0.2239, "step": 7840 }, { "epoch": 25.12, "grad_norm": Infinity, "learning_rate": 0.0001792497625830959, "loss": 0.2393, "step": 7850 }, { "epoch": 25.152, "grad_norm": Infinity, "learning_rate": 0.00017806267806267807, "loss": 0.192, "step": 7860 }, { "epoch": 25.184, "grad_norm": Infinity, "learning_rate": 0.0001768755935422602, "loss": 0.2059, "step": 7870 }, { "epoch": 25.216, "grad_norm": Infinity, "learning_rate": 0.00017568850902184235, "loss": 0.2137, "step": 7880 }, { "epoch": 25.248, "grad_norm": Infinity, "learning_rate": 0.0001745014245014245, "loss": 0.2123, "step": 7890 }, { "epoch": 25.28, "grad_norm": Infinity, "learning_rate": 0.00017331433998100666, "loss": 0.2252, "step": 7900 }, { "epoch": 25.312, "grad_norm": 1587581.25, "learning_rate": 0.0001721272554605888, "loss": 0.2132, "step": 7910 }, { "epoch": 25.344, "grad_norm": Infinity, "learning_rate": 0.00017094017094017094, "loss": 0.2104, "step": 7920 }, { "epoch": 25.376, "grad_norm": NaN, "learning_rate": 0.0001697530864197531, "loss": 0.1865, "step": 7930 }, { "epoch": 25.408, "grad_norm": Infinity, "learning_rate": 0.00016856600189933525, "loss": 0.2034, "step": 7940 }, { "epoch": 25.44, "grad_norm": 1961501.125, "learning_rate": 0.0001673789173789174, "loss": 0.2157, "step": 7950 }, { "epoch": 25.472, "grad_norm": Infinity, "learning_rate": 0.00016619183285849953, "loss": 0.2035, "step": 7960 }, { "epoch": 25.504, "grad_norm": 1725835.5, "learning_rate": 0.0001650047483380817, "loss": 0.2063, "step": 7970 }, { "epoch": 25.536, "grad_norm": Infinity, "learning_rate": 0.00016381766381766384, "loss": 0.2128, "step": 7980 }, { "epoch": 25.568, "grad_norm": NaN, "learning_rate": 0.00016263057929724598, "loss": 0.2029, "step": 7990 }, { "epoch": 25.6, "grad_norm": Infinity, "learning_rate": 0.00016144349477682812, "loss": 0.2183, "step": 8000 }, { "epoch": 25.632, "grad_norm": NaN, "learning_rate": 0.00016025641025641028, "loss": 0.2269, "step": 8010 }, { "epoch": 25.664, "grad_norm": Infinity, "learning_rate": 0.0001590693257359924, "loss": 0.2039, "step": 8020 }, { "epoch": 25.696, "grad_norm": NaN, "learning_rate": 0.00015788224121557454, "loss": 0.2344, "step": 8030 }, { "epoch": 25.728, "grad_norm": Infinity, "learning_rate": 0.00015669515669515668, "loss": 0.1994, "step": 8040 }, { "epoch": 25.76, "grad_norm": NaN, "learning_rate": 0.00015550807217473884, "loss": 0.2006, "step": 8050 }, { "epoch": 25.792, "grad_norm": NaN, "learning_rate": 0.00015432098765432098, "loss": 0.2082, "step": 8060 }, { "epoch": 25.824, "grad_norm": NaN, "learning_rate": 0.00015313390313390312, "loss": 0.1903, "step": 8070 }, { "epoch": 25.856, "grad_norm": NaN, "learning_rate": 0.00015194681861348526, "loss": 0.2106, "step": 8080 }, { "epoch": 25.888, "grad_norm": NaN, "learning_rate": 0.00015075973409306743, "loss": 0.2059, "step": 8090 }, { "epoch": 25.92, "grad_norm": NaN, "learning_rate": 0.00014957264957264957, "loss": 0.2104, "step": 8100 }, { "epoch": 25.951999999999998, "grad_norm": NaN, "learning_rate": 0.0001483855650522317, "loss": 0.2069, "step": 8110 }, { "epoch": 25.984, "grad_norm": Infinity, "learning_rate": 0.00014719848053181385, "loss": 0.2173, "step": 8120 }, { "epoch": 26.0, "eval_accuracy": 0.7675, "eval_loss": 0.5737802386283875, "eval_runtime": 60.4948, "eval_samples_per_second": 66.121, "eval_steps_per_second": 2.066, "step": 8125 }, { "epoch": 26.016, "grad_norm": NaN, "learning_rate": 0.00014601139601139602, "loss": 0.2187, "step": 8130 }, { "epoch": 26.048, "grad_norm": NaN, "learning_rate": 0.00014482431149097816, "loss": 0.2001, "step": 8140 }, { "epoch": 26.08, "grad_norm": NaN, "learning_rate": 0.0001436372269705603, "loss": 0.2392, "step": 8150 }, { "epoch": 26.112, "grad_norm": NaN, "learning_rate": 0.00014245014245014247, "loss": 0.2125, "step": 8160 }, { "epoch": 26.144, "grad_norm": NaN, "learning_rate": 0.0001412630579297246, "loss": 0.2118, "step": 8170 }, { "epoch": 26.176, "grad_norm": NaN, "learning_rate": 0.00014007597340930675, "loss": 0.2093, "step": 8180 }, { "epoch": 26.208, "grad_norm": NaN, "learning_rate": 0.0001388888888888889, "loss": 0.2059, "step": 8190 }, { "epoch": 26.24, "grad_norm": NaN, "learning_rate": 0.00013770180436847106, "loss": 0.2307, "step": 8200 }, { "epoch": 26.272, "grad_norm": NaN, "learning_rate": 0.0001365147198480532, "loss": 0.199, "step": 8210 }, { "epoch": 26.304, "grad_norm": NaN, "learning_rate": 0.00013532763532763534, "loss": 0.2089, "step": 8220 }, { "epoch": 26.336, "grad_norm": NaN, "learning_rate": 0.00013414055080721748, "loss": 0.2205, "step": 8230 }, { "epoch": 26.368, "grad_norm": NaN, "learning_rate": 0.00013295346628679964, "loss": 0.2175, "step": 8240 }, { "epoch": 26.4, "grad_norm": Infinity, "learning_rate": 0.00013176638176638178, "loss": 0.2002, "step": 8250 }, { "epoch": 26.432, "grad_norm": NaN, "learning_rate": 0.0001305792972459639, "loss": 0.2078, "step": 8260 }, { "epoch": 26.464, "grad_norm": Infinity, "learning_rate": 0.00012939221272554604, "loss": 0.2287, "step": 8270 }, { "epoch": 26.496, "grad_norm": NaN, "learning_rate": 0.0001282051282051282, "loss": 0.2089, "step": 8280 }, { "epoch": 26.528, "grad_norm": NaN, "learning_rate": 0.00012701804368471035, "loss": 0.2076, "step": 8290 }, { "epoch": 26.56, "grad_norm": NaN, "learning_rate": 0.00012583095916429249, "loss": 0.2157, "step": 8300 }, { "epoch": 26.592, "grad_norm": NaN, "learning_rate": 0.00012464387464387465, "loss": 0.2178, "step": 8310 }, { "epoch": 26.624, "grad_norm": Infinity, "learning_rate": 0.0001234567901234568, "loss": 0.2168, "step": 8320 }, { "epoch": 26.656, "grad_norm": NaN, "learning_rate": 0.00012226970560303893, "loss": 0.2004, "step": 8330 }, { "epoch": 26.688, "grad_norm": NaN, "learning_rate": 0.00012108262108262109, "loss": 0.2131, "step": 8340 }, { "epoch": 26.72, "grad_norm": NaN, "learning_rate": 0.00011989553656220323, "loss": 0.2059, "step": 8350 }, { "epoch": 26.752, "grad_norm": NaN, "learning_rate": 0.00011870845204178538, "loss": 0.2218, "step": 8360 }, { "epoch": 26.784, "grad_norm": NaN, "learning_rate": 0.00011752136752136752, "loss": 0.1975, "step": 8370 }, { "epoch": 26.816, "grad_norm": NaN, "learning_rate": 0.00011633428300094968, "loss": 0.2113, "step": 8380 }, { "epoch": 26.848, "grad_norm": NaN, "learning_rate": 0.00011514719848053182, "loss": 0.2346, "step": 8390 }, { "epoch": 26.88, "grad_norm": NaN, "learning_rate": 0.00011396011396011397, "loss": 0.2067, "step": 8400 }, { "epoch": 26.912, "grad_norm": NaN, "learning_rate": 0.00011277302943969611, "loss": 0.2158, "step": 8410 }, { "epoch": 26.944, "grad_norm": NaN, "learning_rate": 0.00011158594491927826, "loss": 0.2046, "step": 8420 }, { "epoch": 26.976, "grad_norm": NaN, "learning_rate": 0.0001103988603988604, "loss": 0.2216, "step": 8430 }, { "epoch": 26.9984, "eval_accuracy": 0.76725, "eval_loss": 0.5557342171669006, "eval_runtime": 54.1898, "eval_samples_per_second": 73.815, "eval_steps_per_second": 2.307, "step": 8437 }, { "epoch": 27.008, "grad_norm": NaN, "learning_rate": 0.00010921177587844256, "loss": 0.2184, "step": 8440 }, { "epoch": 27.04, "grad_norm": NaN, "learning_rate": 0.00010802469135802468, "loss": 0.2286, "step": 8450 }, { "epoch": 27.072, "grad_norm": Infinity, "learning_rate": 0.00010683760683760684, "loss": 0.2076, "step": 8460 }, { "epoch": 27.104, "grad_norm": NaN, "learning_rate": 0.00010565052231718898, "loss": 0.2199, "step": 8470 }, { "epoch": 27.136, "grad_norm": NaN, "learning_rate": 0.00010446343779677113, "loss": 0.2072, "step": 8480 }, { "epoch": 27.168, "grad_norm": Infinity, "learning_rate": 0.00010327635327635327, "loss": 0.2274, "step": 8490 }, { "epoch": 27.2, "grad_norm": NaN, "learning_rate": 0.00010208926875593543, "loss": 0.2152, "step": 8500 }, { "epoch": 27.232, "grad_norm": NaN, "learning_rate": 0.00010090218423551757, "loss": 0.2265, "step": 8510 }, { "epoch": 27.264, "grad_norm": NaN, "learning_rate": 9.971509971509972e-05, "loss": 0.2348, "step": 8520 }, { "epoch": 27.296, "grad_norm": NaN, "learning_rate": 9.852801519468186e-05, "loss": 0.2254, "step": 8530 }, { "epoch": 27.328, "grad_norm": NaN, "learning_rate": 9.734093067426401e-05, "loss": 0.2017, "step": 8540 }, { "epoch": 27.36, "grad_norm": NaN, "learning_rate": 9.615384615384617e-05, "loss": 0.1982, "step": 8550 }, { "epoch": 27.392, "grad_norm": Infinity, "learning_rate": 9.496676163342831e-05, "loss": 0.2028, "step": 8560 }, { "epoch": 27.424, "grad_norm": NaN, "learning_rate": 9.377967711301045e-05, "loss": 0.2082, "step": 8570 }, { "epoch": 27.456, "grad_norm": NaN, "learning_rate": 9.259259259259259e-05, "loss": 0.1861, "step": 8580 }, { "epoch": 27.488, "grad_norm": NaN, "learning_rate": 9.140550807217474e-05, "loss": 0.2172, "step": 8590 }, { "epoch": 27.52, "grad_norm": NaN, "learning_rate": 9.021842355175688e-05, "loss": 0.1983, "step": 8600 }, { "epoch": 27.552, "grad_norm": NaN, "learning_rate": 8.903133903133904e-05, "loss": 0.2247, "step": 8610 }, { "epoch": 27.584, "grad_norm": NaN, "learning_rate": 8.784425451092118e-05, "loss": 0.2131, "step": 8620 }, { "epoch": 27.616, "grad_norm": NaN, "learning_rate": 8.665716999050333e-05, "loss": 0.2163, "step": 8630 }, { "epoch": 27.648, "grad_norm": NaN, "learning_rate": 8.547008547008547e-05, "loss": 0.2069, "step": 8640 }, { "epoch": 27.68, "grad_norm": NaN, "learning_rate": 8.428300094966762e-05, "loss": 0.1939, "step": 8650 }, { "epoch": 27.712, "grad_norm": NaN, "learning_rate": 8.309591642924976e-05, "loss": 0.2181, "step": 8660 }, { "epoch": 27.744, "grad_norm": NaN, "learning_rate": 8.190883190883192e-05, "loss": 0.1991, "step": 8670 }, { "epoch": 27.776, "grad_norm": NaN, "learning_rate": 8.072174738841406e-05, "loss": 0.2245, "step": 8680 }, { "epoch": 27.808, "grad_norm": Infinity, "learning_rate": 7.95346628679962e-05, "loss": 0.2061, "step": 8690 }, { "epoch": 27.84, "grad_norm": NaN, "learning_rate": 7.834757834757834e-05, "loss": 0.2171, "step": 8700 }, { "epoch": 27.872, "grad_norm": Infinity, "learning_rate": 7.716049382716049e-05, "loss": 0.2166, "step": 8710 }, { "epoch": 27.904, "grad_norm": NaN, "learning_rate": 7.597340930674263e-05, "loss": 0.2272, "step": 8720 }, { "epoch": 27.936, "grad_norm": Infinity, "learning_rate": 7.478632478632479e-05, "loss": 0.1896, "step": 8730 }, { "epoch": 27.968, "grad_norm": NaN, "learning_rate": 7.359924026590693e-05, "loss": 0.1975, "step": 8740 }, { "epoch": 28.0, "grad_norm": NaN, "learning_rate": 7.241215574548908e-05, "loss": 0.1918, "step": 8750 }, { "epoch": 28.0, "eval_accuracy": 0.7705, "eval_loss": 0.5501910448074341, "eval_runtime": 51.9391, "eval_samples_per_second": 77.013, "eval_steps_per_second": 2.407, "step": 8750 }, { "epoch": 28.032, "grad_norm": Infinity, "learning_rate": 7.122507122507123e-05, "loss": 0.2219, "step": 8760 }, { "epoch": 28.064, "grad_norm": NaN, "learning_rate": 7.003798670465337e-05, "loss": 0.2083, "step": 8770 }, { "epoch": 28.096, "grad_norm": NaN, "learning_rate": 6.885090218423553e-05, "loss": 0.2184, "step": 8780 }, { "epoch": 28.128, "grad_norm": NaN, "learning_rate": 6.766381766381767e-05, "loss": 0.2101, "step": 8790 }, { "epoch": 28.16, "grad_norm": NaN, "learning_rate": 6.647673314339982e-05, "loss": 0.2107, "step": 8800 }, { "epoch": 28.192, "grad_norm": NaN, "learning_rate": 6.528964862298195e-05, "loss": 0.2187, "step": 8810 }, { "epoch": 28.224, "grad_norm": NaN, "learning_rate": 6.41025641025641e-05, "loss": 0.2333, "step": 8820 }, { "epoch": 28.256, "grad_norm": NaN, "learning_rate": 6.291547958214624e-05, "loss": 0.1879, "step": 8830 }, { "epoch": 28.288, "grad_norm": NaN, "learning_rate": 6.17283950617284e-05, "loss": 0.223, "step": 8840 }, { "epoch": 28.32, "grad_norm": NaN, "learning_rate": 6.0541310541310544e-05, "loss": 0.2429, "step": 8850 }, { "epoch": 28.352, "grad_norm": NaN, "learning_rate": 5.935422602089269e-05, "loss": 0.2101, "step": 8860 }, { "epoch": 28.384, "grad_norm": NaN, "learning_rate": 5.816714150047484e-05, "loss": 0.2148, "step": 8870 }, { "epoch": 28.416, "grad_norm": Infinity, "learning_rate": 5.6980056980056985e-05, "loss": 0.2202, "step": 8880 }, { "epoch": 28.448, "grad_norm": NaN, "learning_rate": 5.579297245963913e-05, "loss": 0.227, "step": 8890 }, { "epoch": 28.48, "grad_norm": NaN, "learning_rate": 5.460588793922128e-05, "loss": 0.2122, "step": 8900 }, { "epoch": 28.512, "grad_norm": NaN, "learning_rate": 5.341880341880342e-05, "loss": 0.2169, "step": 8910 }, { "epoch": 28.544, "grad_norm": NaN, "learning_rate": 5.2231718898385566e-05, "loss": 0.2016, "step": 8920 }, { "epoch": 28.576, "grad_norm": NaN, "learning_rate": 5.104463437796771e-05, "loss": 0.2147, "step": 8930 }, { "epoch": 28.608, "grad_norm": NaN, "learning_rate": 4.985754985754986e-05, "loss": 0.2198, "step": 8940 }, { "epoch": 28.64, "grad_norm": NaN, "learning_rate": 4.867046533713201e-05, "loss": 0.2045, "step": 8950 }, { "epoch": 28.672, "grad_norm": NaN, "learning_rate": 4.7483380816714154e-05, "loss": 0.2219, "step": 8960 }, { "epoch": 28.704, "grad_norm": NaN, "learning_rate": 4.6296296296296294e-05, "loss": 0.2139, "step": 8970 }, { "epoch": 28.736, "grad_norm": NaN, "learning_rate": 4.510921177587844e-05, "loss": 0.1975, "step": 8980 }, { "epoch": 28.768, "grad_norm": NaN, "learning_rate": 4.392212725546059e-05, "loss": 0.2197, "step": 8990 }, { "epoch": 28.8, "grad_norm": NaN, "learning_rate": 4.2735042735042735e-05, "loss": 0.2083, "step": 9000 }, { "epoch": 28.832, "grad_norm": NaN, "learning_rate": 4.154795821462488e-05, "loss": 0.2399, "step": 9010 }, { "epoch": 28.864, "grad_norm": NaN, "learning_rate": 4.036087369420703e-05, "loss": 0.2088, "step": 9020 }, { "epoch": 28.896, "grad_norm": NaN, "learning_rate": 3.917378917378917e-05, "loss": 0.1972, "step": 9030 }, { "epoch": 28.928, "grad_norm": Infinity, "learning_rate": 3.7986704653371316e-05, "loss": 0.205, "step": 9040 }, { "epoch": 28.96, "grad_norm": NaN, "learning_rate": 3.679962013295346e-05, "loss": 0.1864, "step": 9050 }, { "epoch": 28.992, "grad_norm": NaN, "learning_rate": 3.561253561253562e-05, "loss": 0.199, "step": 9060 }, { "epoch": 28.9984, "eval_accuracy": 0.7675, "eval_loss": 0.5455857515335083, "eval_runtime": 52.0571, "eval_samples_per_second": 76.839, "eval_steps_per_second": 2.401, "step": 9062 }, { "epoch": 29.024, "grad_norm": NaN, "learning_rate": 3.4425451092117764e-05, "loss": 0.1993, "step": 9070 }, { "epoch": 29.056, "grad_norm": NaN, "learning_rate": 3.323836657169991e-05, "loss": 0.1937, "step": 9080 }, { "epoch": 29.088, "grad_norm": NaN, "learning_rate": 3.205128205128205e-05, "loss": 0.2244, "step": 9090 }, { "epoch": 29.12, "grad_norm": NaN, "learning_rate": 3.08641975308642e-05, "loss": 0.2198, "step": 9100 }, { "epoch": 29.152, "grad_norm": NaN, "learning_rate": 2.9677113010446345e-05, "loss": 0.1889, "step": 9110 }, { "epoch": 29.184, "grad_norm": NaN, "learning_rate": 2.8490028490028492e-05, "loss": 0.2253, "step": 9120 }, { "epoch": 29.216, "grad_norm": Infinity, "learning_rate": 2.730294396961064e-05, "loss": 0.2155, "step": 9130 }, { "epoch": 29.248, "grad_norm": NaN, "learning_rate": 2.6115859449192783e-05, "loss": 0.2084, "step": 9140 }, { "epoch": 29.28, "grad_norm": NaN, "learning_rate": 2.492877492877493e-05, "loss": 0.1883, "step": 9150 }, { "epoch": 29.312, "grad_norm": NaN, "learning_rate": 2.3741690408357077e-05, "loss": 0.225, "step": 9160 }, { "epoch": 29.344, "grad_norm": NaN, "learning_rate": 2.255460588793922e-05, "loss": 0.2065, "step": 9170 }, { "epoch": 29.376, "grad_norm": NaN, "learning_rate": 2.1367521367521368e-05, "loss": 0.2185, "step": 9180 }, { "epoch": 29.408, "grad_norm": NaN, "learning_rate": 2.0180436847103515e-05, "loss": 0.2292, "step": 9190 }, { "epoch": 29.44, "grad_norm": NaN, "learning_rate": 1.8993352326685658e-05, "loss": 0.2212, "step": 9200 }, { "epoch": 29.472, "grad_norm": NaN, "learning_rate": 1.780626780626781e-05, "loss": 0.2327, "step": 9210 }, { "epoch": 29.504, "grad_norm": NaN, "learning_rate": 1.6619183285849956e-05, "loss": 0.1956, "step": 9220 }, { "epoch": 29.536, "grad_norm": NaN, "learning_rate": 1.54320987654321e-05, "loss": 0.2135, "step": 9230 }, { "epoch": 29.568, "grad_norm": NaN, "learning_rate": 1.4245014245014246e-05, "loss": 0.206, "step": 9240 }, { "epoch": 29.6, "grad_norm": NaN, "learning_rate": 1.3057929724596391e-05, "loss": 0.2007, "step": 9250 }, { "epoch": 29.632, "grad_norm": NaN, "learning_rate": 1.1870845204178538e-05, "loss": 0.2297, "step": 9260 }, { "epoch": 29.664, "grad_norm": NaN, "learning_rate": 1.0683760683760684e-05, "loss": 0.2129, "step": 9270 }, { "epoch": 29.696, "grad_norm": NaN, "learning_rate": 9.496676163342829e-06, "loss": 0.1998, "step": 9280 }, { "epoch": 29.728, "grad_norm": NaN, "learning_rate": 8.309591642924978e-06, "loss": 0.2104, "step": 9290 }, { "epoch": 29.76, "grad_norm": NaN, "learning_rate": 7.122507122507123e-06, "loss": 0.2138, "step": 9300 }, { "epoch": 29.792, "grad_norm": NaN, "learning_rate": 5.935422602089269e-06, "loss": 0.2111, "step": 9310 }, { "epoch": 29.824, "grad_norm": NaN, "learning_rate": 4.7483380816714145e-06, "loss": 0.2009, "step": 9320 }, { "epoch": 29.856, "grad_norm": NaN, "learning_rate": 3.5612535612535615e-06, "loss": 0.2021, "step": 9330 }, { "epoch": 29.888, "grad_norm": NaN, "learning_rate": 2.3741690408357073e-06, "loss": 0.1985, "step": 9340 }, { "epoch": 29.92, "grad_norm": NaN, "learning_rate": 1.1870845204178536e-06, "loss": 0.2092, "step": 9350 }, { "epoch": 29.951999999999998, "grad_norm": NaN, "learning_rate": 0.0, "loss": 0.21, "step": 9360 }, { "epoch": 29.951999999999998, "eval_accuracy": 0.7715, "eval_loss": 0.5483142733573914, "eval_runtime": 52.0007, "eval_samples_per_second": 76.922, "eval_steps_per_second": 2.404, "step": 9360 }, { "epoch": 29.951999999999998, "step": 9360, "total_flos": 2.9779463284174356e+19, "train_loss": 0.21163268028161464, "train_runtime": 43715.1441, "train_samples_per_second": 27.45, "train_steps_per_second": 0.214 } ], "logging_steps": 10, "max_steps": 9360, "num_input_tokens_seen": 0, "num_train_epochs": 30, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 2.9779463284174356e+19, "train_batch_size": 32, "trial_name": null, "trial_params": null }