Felipecordeiiro's picture
End of training
7fa3830 verified
raw
history blame
23.6 kB
{
"best_metric": 0.9886666666666667,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-1266",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1266,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023696682464454975,
"grad_norm": 15.438875198364258,
"learning_rate": 3.937007874015748e-06,
"loss": 9.3952,
"step": 10
},
{
"epoch": 0.04739336492890995,
"grad_norm": 17.365249633789062,
"learning_rate": 7.874015748031496e-06,
"loss": 9.0232,
"step": 20
},
{
"epoch": 0.07109004739336493,
"grad_norm": 23.01938819885254,
"learning_rate": 1.1811023622047245e-05,
"loss": 8.5712,
"step": 30
},
{
"epoch": 0.0947867298578199,
"grad_norm": 51.1461067199707,
"learning_rate": 1.5748031496062993e-05,
"loss": 7.8145,
"step": 40
},
{
"epoch": 0.11848341232227488,
"grad_norm": 48.782527923583984,
"learning_rate": 1.9685039370078743e-05,
"loss": 6.7351,
"step": 50
},
{
"epoch": 0.14218009478672985,
"grad_norm": 86.34725189208984,
"learning_rate": 2.362204724409449e-05,
"loss": 5.2743,
"step": 60
},
{
"epoch": 0.16587677725118483,
"grad_norm": 49.05707550048828,
"learning_rate": 2.755905511811024e-05,
"loss": 4.1303,
"step": 70
},
{
"epoch": 0.1895734597156398,
"grad_norm": 70.76948547363281,
"learning_rate": 3.1496062992125985e-05,
"loss": 3.4811,
"step": 80
},
{
"epoch": 0.2132701421800948,
"grad_norm": 65.15145111083984,
"learning_rate": 3.5433070866141735e-05,
"loss": 3.2691,
"step": 90
},
{
"epoch": 0.23696682464454977,
"grad_norm": 39.73679733276367,
"learning_rate": 3.9370078740157485e-05,
"loss": 2.9577,
"step": 100
},
{
"epoch": 0.26066350710900477,
"grad_norm": 66.20768737792969,
"learning_rate": 4.330708661417323e-05,
"loss": 2.6592,
"step": 110
},
{
"epoch": 0.2843601895734597,
"grad_norm": 50.88495635986328,
"learning_rate": 4.724409448818898e-05,
"loss": 2.4807,
"step": 120
},
{
"epoch": 0.3080568720379147,
"grad_norm": 43.55908203125,
"learning_rate": 4.986830553116769e-05,
"loss": 2.3623,
"step": 130
},
{
"epoch": 0.33175355450236965,
"grad_norm": 30.119890213012695,
"learning_rate": 4.9429323968393335e-05,
"loss": 2.3652,
"step": 140
},
{
"epoch": 0.35545023696682465,
"grad_norm": 28.67392349243164,
"learning_rate": 4.8990342405618965e-05,
"loss": 2.3158,
"step": 150
},
{
"epoch": 0.3791469194312796,
"grad_norm": 31.85961151123047,
"learning_rate": 4.85513608428446e-05,
"loss": 2.3018,
"step": 160
},
{
"epoch": 0.4028436018957346,
"grad_norm": 75.70964813232422,
"learning_rate": 4.8112379280070244e-05,
"loss": 2.1835,
"step": 170
},
{
"epoch": 0.4265402843601896,
"grad_norm": 24.69963836669922,
"learning_rate": 4.7673397717295874e-05,
"loss": 2.0962,
"step": 180
},
{
"epoch": 0.45023696682464454,
"grad_norm": 46.664432525634766,
"learning_rate": 4.723441615452151e-05,
"loss": 2.0229,
"step": 190
},
{
"epoch": 0.47393364928909953,
"grad_norm": 31.81464195251465,
"learning_rate": 4.679543459174715e-05,
"loss": 2.0519,
"step": 200
},
{
"epoch": 0.4976303317535545,
"grad_norm": 25.131099700927734,
"learning_rate": 4.635645302897278e-05,
"loss": 2.1874,
"step": 210
},
{
"epoch": 0.5213270142180095,
"grad_norm": 23.477203369140625,
"learning_rate": 4.591747146619842e-05,
"loss": 2.2174,
"step": 220
},
{
"epoch": 0.5450236966824644,
"grad_norm": 28.71649742126465,
"learning_rate": 4.547848990342406e-05,
"loss": 1.8939,
"step": 230
},
{
"epoch": 0.5687203791469194,
"grad_norm": 25.678308486938477,
"learning_rate": 4.503950834064969e-05,
"loss": 1.9235,
"step": 240
},
{
"epoch": 0.5924170616113744,
"grad_norm": 17.22383689880371,
"learning_rate": 4.460052677787533e-05,
"loss": 1.9575,
"step": 250
},
{
"epoch": 0.6161137440758294,
"grad_norm": 25.669090270996094,
"learning_rate": 4.416154521510097e-05,
"loss": 1.8465,
"step": 260
},
{
"epoch": 0.6398104265402843,
"grad_norm": 27.523319244384766,
"learning_rate": 4.37225636523266e-05,
"loss": 1.9501,
"step": 270
},
{
"epoch": 0.6635071090047393,
"grad_norm": 23.112300872802734,
"learning_rate": 4.328358208955224e-05,
"loss": 1.7979,
"step": 280
},
{
"epoch": 0.6872037914691943,
"grad_norm": 21.1746768951416,
"learning_rate": 4.284460052677788e-05,
"loss": 1.7745,
"step": 290
},
{
"epoch": 0.7109004739336493,
"grad_norm": 39.38931655883789,
"learning_rate": 4.240561896400351e-05,
"loss": 1.8012,
"step": 300
},
{
"epoch": 0.7345971563981043,
"grad_norm": 32.20198440551758,
"learning_rate": 4.196663740122915e-05,
"loss": 2.0618,
"step": 310
},
{
"epoch": 0.7582938388625592,
"grad_norm": 23.157480239868164,
"learning_rate": 4.152765583845479e-05,
"loss": 1.7972,
"step": 320
},
{
"epoch": 0.7819905213270142,
"grad_norm": 24.55837631225586,
"learning_rate": 4.108867427568042e-05,
"loss": 1.8858,
"step": 330
},
{
"epoch": 0.8056872037914692,
"grad_norm": 18.101133346557617,
"learning_rate": 4.064969271290606e-05,
"loss": 1.7343,
"step": 340
},
{
"epoch": 0.8293838862559242,
"grad_norm": 25.414764404296875,
"learning_rate": 4.02107111501317e-05,
"loss": 1.8935,
"step": 350
},
{
"epoch": 0.8530805687203792,
"grad_norm": 15.749005317687988,
"learning_rate": 3.977172958735733e-05,
"loss": 1.5343,
"step": 360
},
{
"epoch": 0.8767772511848341,
"grad_norm": 22.70486831665039,
"learning_rate": 3.933274802458297e-05,
"loss": 1.6898,
"step": 370
},
{
"epoch": 0.9004739336492891,
"grad_norm": 12.152778625488281,
"learning_rate": 3.889376646180861e-05,
"loss": 1.7357,
"step": 380
},
{
"epoch": 0.9241706161137441,
"grad_norm": 14.83531379699707,
"learning_rate": 3.8454784899034244e-05,
"loss": 1.6003,
"step": 390
},
{
"epoch": 0.9478672985781991,
"grad_norm": 21.903425216674805,
"learning_rate": 3.801580333625988e-05,
"loss": 1.6476,
"step": 400
},
{
"epoch": 0.9715639810426541,
"grad_norm": 12.695667266845703,
"learning_rate": 3.757682177348552e-05,
"loss": 1.6488,
"step": 410
},
{
"epoch": 0.995260663507109,
"grad_norm": 18.37169647216797,
"learning_rate": 3.713784021071115e-05,
"loss": 1.4709,
"step": 420
},
{
"epoch": 1.0,
"eval_accuracy": 0.9806666666666667,
"eval_loss": 0.06099523976445198,
"eval_runtime": 35.6417,
"eval_samples_per_second": 168.342,
"eval_steps_per_second": 5.275,
"step": 422
},
{
"epoch": 1.018957345971564,
"grad_norm": 14.619269371032715,
"learning_rate": 3.669885864793679e-05,
"loss": 1.6787,
"step": 430
},
{
"epoch": 1.042654028436019,
"grad_norm": 17.89664077758789,
"learning_rate": 3.6259877085162426e-05,
"loss": 1.667,
"step": 440
},
{
"epoch": 1.066350710900474,
"grad_norm": 17.098857879638672,
"learning_rate": 3.582089552238806e-05,
"loss": 1.5486,
"step": 450
},
{
"epoch": 1.0900473933649288,
"grad_norm": 18.231796264648438,
"learning_rate": 3.53819139596137e-05,
"loss": 1.5404,
"step": 460
},
{
"epoch": 1.113744075829384,
"grad_norm": 18.734926223754883,
"learning_rate": 3.4942932396839335e-05,
"loss": 1.6994,
"step": 470
},
{
"epoch": 1.1374407582938388,
"grad_norm": 15.583982467651367,
"learning_rate": 3.450395083406497e-05,
"loss": 1.5612,
"step": 480
},
{
"epoch": 1.161137440758294,
"grad_norm": 19.229564666748047,
"learning_rate": 3.406496927129061e-05,
"loss": 1.4565,
"step": 490
},
{
"epoch": 1.1848341232227488,
"grad_norm": 23.661117553710938,
"learning_rate": 3.3625987708516244e-05,
"loss": 1.638,
"step": 500
},
{
"epoch": 1.2085308056872037,
"grad_norm": 19.3845272064209,
"learning_rate": 3.318700614574188e-05,
"loss": 1.841,
"step": 510
},
{
"epoch": 1.2322274881516588,
"grad_norm": 22.151485443115234,
"learning_rate": 3.274802458296752e-05,
"loss": 1.8073,
"step": 520
},
{
"epoch": 1.2559241706161137,
"grad_norm": 24.08620262145996,
"learning_rate": 3.230904302019315e-05,
"loss": 1.523,
"step": 530
},
{
"epoch": 1.2796208530805688,
"grad_norm": 25.005783081054688,
"learning_rate": 3.187006145741879e-05,
"loss": 1.5896,
"step": 540
},
{
"epoch": 1.3033175355450237,
"grad_norm": 22.422975540161133,
"learning_rate": 3.1431079894644426e-05,
"loss": 1.599,
"step": 550
},
{
"epoch": 1.3270142180094786,
"grad_norm": 18.511837005615234,
"learning_rate": 3.099209833187006e-05,
"loss": 1.621,
"step": 560
},
{
"epoch": 1.3507109004739337,
"grad_norm": 20.40373420715332,
"learning_rate": 3.0553116769095705e-05,
"loss": 1.573,
"step": 570
},
{
"epoch": 1.3744075829383886,
"grad_norm": 19.02882957458496,
"learning_rate": 3.0114135206321338e-05,
"loss": 1.464,
"step": 580
},
{
"epoch": 1.3981042654028437,
"grad_norm": 18.40727996826172,
"learning_rate": 2.967515364354697e-05,
"loss": 1.5742,
"step": 590
},
{
"epoch": 1.4218009478672986,
"grad_norm": 21.194843292236328,
"learning_rate": 2.923617208077261e-05,
"loss": 1.334,
"step": 600
},
{
"epoch": 1.4454976303317535,
"grad_norm": 18.509262084960938,
"learning_rate": 2.8797190517998247e-05,
"loss": 1.5017,
"step": 610
},
{
"epoch": 1.4691943127962086,
"grad_norm": 20.656261444091797,
"learning_rate": 2.835820895522388e-05,
"loss": 1.6425,
"step": 620
},
{
"epoch": 1.4928909952606635,
"grad_norm": 16.49464988708496,
"learning_rate": 2.791922739244952e-05,
"loss": 1.4641,
"step": 630
},
{
"epoch": 1.5165876777251186,
"grad_norm": 29.223224639892578,
"learning_rate": 2.7480245829675156e-05,
"loss": 1.4755,
"step": 640
},
{
"epoch": 1.5402843601895735,
"grad_norm": 22.57363510131836,
"learning_rate": 2.704126426690079e-05,
"loss": 1.418,
"step": 650
},
{
"epoch": 1.5639810426540284,
"grad_norm": 20.45244026184082,
"learning_rate": 2.660228270412643e-05,
"loss": 1.5061,
"step": 660
},
{
"epoch": 1.5876777251184833,
"grad_norm": 18.2165470123291,
"learning_rate": 2.6163301141352066e-05,
"loss": 1.5323,
"step": 670
},
{
"epoch": 1.6113744075829384,
"grad_norm": 15.586666107177734,
"learning_rate": 2.57243195785777e-05,
"loss": 1.4886,
"step": 680
},
{
"epoch": 1.6350710900473935,
"grad_norm": 21.27082633972168,
"learning_rate": 2.5285338015803338e-05,
"loss": 1.5164,
"step": 690
},
{
"epoch": 1.6587677725118484,
"grad_norm": 18.144695281982422,
"learning_rate": 2.4846356453028975e-05,
"loss": 1.4823,
"step": 700
},
{
"epoch": 1.6824644549763033,
"grad_norm": 18.286558151245117,
"learning_rate": 2.440737489025461e-05,
"loss": 1.5464,
"step": 710
},
{
"epoch": 1.7061611374407581,
"grad_norm": 16.35245704650879,
"learning_rate": 2.3968393327480247e-05,
"loss": 1.4798,
"step": 720
},
{
"epoch": 1.7298578199052133,
"grad_norm": 18.115129470825195,
"learning_rate": 2.3529411764705884e-05,
"loss": 1.4282,
"step": 730
},
{
"epoch": 1.7535545023696684,
"grad_norm": 10.362217903137207,
"learning_rate": 2.309043020193152e-05,
"loss": 1.5111,
"step": 740
},
{
"epoch": 1.7772511848341233,
"grad_norm": 14.301712989807129,
"learning_rate": 2.2651448639157156e-05,
"loss": 1.5408,
"step": 750
},
{
"epoch": 1.8009478672985781,
"grad_norm": 11.159360885620117,
"learning_rate": 2.2212467076382793e-05,
"loss": 1.3557,
"step": 760
},
{
"epoch": 1.824644549763033,
"grad_norm": 22.02513885498047,
"learning_rate": 2.177348551360843e-05,
"loss": 1.4304,
"step": 770
},
{
"epoch": 1.8483412322274881,
"grad_norm": 15.173885345458984,
"learning_rate": 2.1334503950834065e-05,
"loss": 1.5921,
"step": 780
},
{
"epoch": 1.8720379146919433,
"grad_norm": 16.821144104003906,
"learning_rate": 2.0895522388059702e-05,
"loss": 1.4525,
"step": 790
},
{
"epoch": 1.8957345971563981,
"grad_norm": 18.36412811279297,
"learning_rate": 2.0456540825285338e-05,
"loss": 1.3811,
"step": 800
},
{
"epoch": 1.919431279620853,
"grad_norm": 18.73419189453125,
"learning_rate": 2.0017559262510978e-05,
"loss": 1.5829,
"step": 810
},
{
"epoch": 1.943127962085308,
"grad_norm": 15.17447566986084,
"learning_rate": 1.957857769973661e-05,
"loss": 1.317,
"step": 820
},
{
"epoch": 1.966824644549763,
"grad_norm": 18.464017868041992,
"learning_rate": 1.9139596136962247e-05,
"loss": 1.5998,
"step": 830
},
{
"epoch": 1.9905213270142181,
"grad_norm": 16.639902114868164,
"learning_rate": 1.8700614574187887e-05,
"loss": 1.5976,
"step": 840
},
{
"epoch": 2.0,
"eval_accuracy": 0.9868333333333333,
"eval_loss": 0.04062141478061676,
"eval_runtime": 35.7487,
"eval_samples_per_second": 167.838,
"eval_steps_per_second": 5.259,
"step": 844
},
{
"epoch": 2.014218009478673,
"grad_norm": 14.642781257629395,
"learning_rate": 1.826163301141352e-05,
"loss": 1.4676,
"step": 850
},
{
"epoch": 2.037914691943128,
"grad_norm": 18.012798309326172,
"learning_rate": 1.7822651448639156e-05,
"loss": 1.5122,
"step": 860
},
{
"epoch": 2.061611374407583,
"grad_norm": 19.37873077392578,
"learning_rate": 1.7383669885864796e-05,
"loss": 1.3235,
"step": 870
},
{
"epoch": 2.085308056872038,
"grad_norm": 16.801088333129883,
"learning_rate": 1.694468832309043e-05,
"loss": 1.359,
"step": 880
},
{
"epoch": 2.109004739336493,
"grad_norm": 18.08489418029785,
"learning_rate": 1.6505706760316065e-05,
"loss": 1.3441,
"step": 890
},
{
"epoch": 2.132701421800948,
"grad_norm": 14.385065078735352,
"learning_rate": 1.6066725197541705e-05,
"loss": 1.3717,
"step": 900
},
{
"epoch": 2.156398104265403,
"grad_norm": 14.971292495727539,
"learning_rate": 1.562774363476734e-05,
"loss": 1.4493,
"step": 910
},
{
"epoch": 2.1800947867298577,
"grad_norm": 15.784900665283203,
"learning_rate": 1.5188762071992976e-05,
"loss": 1.3515,
"step": 920
},
{
"epoch": 2.2037914691943126,
"grad_norm": 18.48076820373535,
"learning_rate": 1.4749780509218614e-05,
"loss": 1.5577,
"step": 930
},
{
"epoch": 2.227488151658768,
"grad_norm": 17.10760498046875,
"learning_rate": 1.431079894644425e-05,
"loss": 1.4396,
"step": 940
},
{
"epoch": 2.251184834123223,
"grad_norm": 25.413055419921875,
"learning_rate": 1.3871817383669885e-05,
"loss": 1.36,
"step": 950
},
{
"epoch": 2.2748815165876777,
"grad_norm": 25.242835998535156,
"learning_rate": 1.3432835820895523e-05,
"loss": 1.4158,
"step": 960
},
{
"epoch": 2.2985781990521326,
"grad_norm": 13.354720115661621,
"learning_rate": 1.299385425812116e-05,
"loss": 1.2912,
"step": 970
},
{
"epoch": 2.322274881516588,
"grad_norm": 19.005489349365234,
"learning_rate": 1.2554872695346794e-05,
"loss": 1.3714,
"step": 980
},
{
"epoch": 2.345971563981043,
"grad_norm": 14.166816711425781,
"learning_rate": 1.2115891132572432e-05,
"loss": 1.5125,
"step": 990
},
{
"epoch": 2.3696682464454977,
"grad_norm": 15.295882225036621,
"learning_rate": 1.1676909569798069e-05,
"loss": 1.2683,
"step": 1000
},
{
"epoch": 2.3933649289099526,
"grad_norm": 15.587239265441895,
"learning_rate": 1.1237928007023705e-05,
"loss": 1.4069,
"step": 1010
},
{
"epoch": 2.4170616113744074,
"grad_norm": 14.5875883102417,
"learning_rate": 1.0798946444249341e-05,
"loss": 1.2272,
"step": 1020
},
{
"epoch": 2.4407582938388623,
"grad_norm": 18.715368270874023,
"learning_rate": 1.035996488147498e-05,
"loss": 1.2901,
"step": 1030
},
{
"epoch": 2.4644549763033177,
"grad_norm": 16.098590850830078,
"learning_rate": 9.920983318700614e-06,
"loss": 1.3823,
"step": 1040
},
{
"epoch": 2.4881516587677726,
"grad_norm": 13.056846618652344,
"learning_rate": 9.482001755926252e-06,
"loss": 1.377,
"step": 1050
},
{
"epoch": 2.5118483412322274,
"grad_norm": 20.628856658935547,
"learning_rate": 9.043020193151889e-06,
"loss": 1.2071,
"step": 1060
},
{
"epoch": 2.5355450236966823,
"grad_norm": 16.558698654174805,
"learning_rate": 8.604038630377525e-06,
"loss": 1.431,
"step": 1070
},
{
"epoch": 2.5592417061611377,
"grad_norm": 15.537813186645508,
"learning_rate": 8.165057067603161e-06,
"loss": 1.2182,
"step": 1080
},
{
"epoch": 2.5829383886255926,
"grad_norm": 17.30754280090332,
"learning_rate": 7.726075504828798e-06,
"loss": 1.3382,
"step": 1090
},
{
"epoch": 2.6066350710900474,
"grad_norm": 14.758044242858887,
"learning_rate": 7.287093942054433e-06,
"loss": 1.3603,
"step": 1100
},
{
"epoch": 2.6303317535545023,
"grad_norm": 17.097063064575195,
"learning_rate": 6.84811237928007e-06,
"loss": 1.3597,
"step": 1110
},
{
"epoch": 2.654028436018957,
"grad_norm": 15.043681144714355,
"learning_rate": 6.409130816505707e-06,
"loss": 1.2835,
"step": 1120
},
{
"epoch": 2.677725118483412,
"grad_norm": 17.108274459838867,
"learning_rate": 5.970149253731343e-06,
"loss": 1.2077,
"step": 1130
},
{
"epoch": 2.7014218009478674,
"grad_norm": 15.565308570861816,
"learning_rate": 5.53116769095698e-06,
"loss": 1.1986,
"step": 1140
},
{
"epoch": 2.7251184834123223,
"grad_norm": 17.27542495727539,
"learning_rate": 5.092186128182617e-06,
"loss": 1.3716,
"step": 1150
},
{
"epoch": 2.748815165876777,
"grad_norm": 18.529542922973633,
"learning_rate": 4.653204565408253e-06,
"loss": 1.2624,
"step": 1160
},
{
"epoch": 2.772511848341232,
"grad_norm": 13.805538177490234,
"learning_rate": 4.2142230026338894e-06,
"loss": 1.3426,
"step": 1170
},
{
"epoch": 2.7962085308056874,
"grad_norm": 17.240198135375977,
"learning_rate": 3.775241439859526e-06,
"loss": 1.3494,
"step": 1180
},
{
"epoch": 2.8199052132701423,
"grad_norm": 14.451141357421875,
"learning_rate": 3.3362598770851626e-06,
"loss": 1.3952,
"step": 1190
},
{
"epoch": 2.843601895734597,
"grad_norm": 13.656115531921387,
"learning_rate": 2.897278314310799e-06,
"loss": 1.3488,
"step": 1200
},
{
"epoch": 2.867298578199052,
"grad_norm": 13.846714973449707,
"learning_rate": 2.4582967515364357e-06,
"loss": 1.2827,
"step": 1210
},
{
"epoch": 2.890995260663507,
"grad_norm": 22.159263610839844,
"learning_rate": 2.019315188762072e-06,
"loss": 1.434,
"step": 1220
},
{
"epoch": 2.914691943127962,
"grad_norm": 20.175447463989258,
"learning_rate": 1.5803336259877086e-06,
"loss": 1.2202,
"step": 1230
},
{
"epoch": 2.938388625592417,
"grad_norm": 17.73303985595703,
"learning_rate": 1.141352063213345e-06,
"loss": 1.4187,
"step": 1240
},
{
"epoch": 2.962085308056872,
"grad_norm": 17.480005264282227,
"learning_rate": 7.023705004389816e-07,
"loss": 1.2624,
"step": 1250
},
{
"epoch": 2.985781990521327,
"grad_norm": 12.864943504333496,
"learning_rate": 2.633889376646181e-07,
"loss": 1.1605,
"step": 1260
},
{
"epoch": 3.0,
"eval_accuracy": 0.9886666666666667,
"eval_loss": 0.0382872000336647,
"eval_runtime": 36.235,
"eval_samples_per_second": 165.586,
"eval_steps_per_second": 5.188,
"step": 1266
},
{
"epoch": 3.0,
"step": 1266,
"total_flos": 4.027570334613504e+18,
"train_loss": 1.9390247602598363,
"train_runtime": 2519.9488,
"train_samples_per_second": 64.287,
"train_steps_per_second": 0.502
}
],
"logging_steps": 10,
"max_steps": 1266,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.027570334613504e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}