|
{ |
|
"best_metric": 0.8866264991906878, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-220", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 220, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.45454545454545453, |
|
"grad_norm": 3.2964470386505127, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.2322, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 2.9043655395507812, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.2604, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8392282958199357, |
|
"eval_f1": 0.8332760485475542, |
|
"eval_loss": 0.2333919107913971, |
|
"eval_precision": 0.8374611693960878, |
|
"eval_recall": 0.8392282958199357, |
|
"eval_runtime": 15.9334, |
|
"eval_samples_per_second": 19.519, |
|
"eval_steps_per_second": 0.628, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.3636363636363638, |
|
"grad_norm": 56.99660873413086, |
|
"learning_rate": 4.797979797979798e-05, |
|
"loss": 0.2223, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 18.48450469970703, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.2492, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8263665594855305, |
|
"eval_f1": 0.8232255290144138, |
|
"eval_loss": 0.2620704472064972, |
|
"eval_precision": 0.8228686443399579, |
|
"eval_recall": 0.8263665594855305, |
|
"eval_runtime": 15.7143, |
|
"eval_samples_per_second": 19.791, |
|
"eval_steps_per_second": 0.636, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.2727272727272725, |
|
"grad_norm": 6.249089241027832, |
|
"learning_rate": 4.292929292929293e-05, |
|
"loss": 0.2374, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 34.324771881103516, |
|
"learning_rate": 4.0404040404040405e-05, |
|
"loss": 0.2373, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8617363344051447, |
|
"eval_f1": 0.8625935578100142, |
|
"eval_loss": 0.23337994515895844, |
|
"eval_precision": 0.8639282703997218, |
|
"eval_recall": 0.8617363344051447, |
|
"eval_runtime": 16.0907, |
|
"eval_samples_per_second": 19.328, |
|
"eval_steps_per_second": 0.621, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 3.1818181818181817, |
|
"grad_norm": 12.280445098876953, |
|
"learning_rate": 3.787878787878788e-05, |
|
"loss": 0.2634, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 4.722353935241699, |
|
"learning_rate": 3.535353535353535e-05, |
|
"loss": 0.2623, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8810289389067524, |
|
"eval_f1": 0.88319217217326, |
|
"eval_loss": 0.2036450058221817, |
|
"eval_precision": 0.8908256857340293, |
|
"eval_recall": 0.8810289389067524, |
|
"eval_runtime": 16.3075, |
|
"eval_samples_per_second": 19.071, |
|
"eval_steps_per_second": 0.613, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 4.090909090909091, |
|
"grad_norm": 11.214787483215332, |
|
"learning_rate": 3.282828282828283e-05, |
|
"loss": 0.2246, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 13.322309494018555, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 0.232, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 8.625347137451172, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.2378, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.819935691318328, |
|
"eval_f1": 0.8172812345261152, |
|
"eval_loss": 0.2944244146347046, |
|
"eval_precision": 0.8165197977946229, |
|
"eval_recall": 0.819935691318328, |
|
"eval_runtime": 16.454, |
|
"eval_samples_per_second": 18.901, |
|
"eval_steps_per_second": 0.608, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 5.456847667694092, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.2711, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.909090909090909, |
|
"grad_norm": 11.77868938446045, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.221, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8681672025723473, |
|
"eval_f1": 0.8700951269031074, |
|
"eval_loss": 0.20269730687141418, |
|
"eval_precision": 0.8751993658233878, |
|
"eval_recall": 0.8681672025723473, |
|
"eval_runtime": 15.757, |
|
"eval_samples_per_second": 19.737, |
|
"eval_steps_per_second": 0.635, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 10.365585327148438, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.2205, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.818181818181818, |
|
"grad_norm": 3.4123003482818604, |
|
"learning_rate": 1.7676767676767676e-05, |
|
"loss": 0.2339, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8585209003215434, |
|
"eval_f1": 0.8577414571643612, |
|
"eval_loss": 0.2290601134300232, |
|
"eval_precision": 0.8572771006256387, |
|
"eval_recall": 0.8585209003215434, |
|
"eval_runtime": 15.7364, |
|
"eval_samples_per_second": 19.763, |
|
"eval_steps_per_second": 0.635, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 6.866532325744629, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.2274, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 7.7272727272727275, |
|
"grad_norm": 28.103683471679688, |
|
"learning_rate": 1.2626262626262628e-05, |
|
"loss": 0.2215, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8681672025723473, |
|
"eval_f1": 0.868338573836183, |
|
"eval_loss": 0.2732490599155426, |
|
"eval_precision": 0.8685295263564022, |
|
"eval_recall": 0.8681672025723473, |
|
"eval_runtime": 17.2464, |
|
"eval_samples_per_second": 18.033, |
|
"eval_steps_per_second": 0.58, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 7.824560642242432, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.2202, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.636363636363637, |
|
"grad_norm": 1.545398473739624, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 0.2162, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8681672025723473, |
|
"eval_f1": 0.8692844166638126, |
|
"eval_loss": 0.2260492444038391, |
|
"eval_precision": 0.8713400647093587, |
|
"eval_recall": 0.8681672025723473, |
|
"eval_runtime": 15.7507, |
|
"eval_samples_per_second": 19.745, |
|
"eval_steps_per_second": 0.635, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 2.2911288738250732, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.2224, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.545454545454545, |
|
"grad_norm": 3.872638463973999, |
|
"learning_rate": 2.5252525252525253e-06, |
|
"loss": 0.2126, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 2.078859567642212, |
|
"learning_rate": 0.0, |
|
"loss": 0.2226, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8842443729903537, |
|
"eval_f1": 0.8866264991906878, |
|
"eval_loss": 0.20013029873371124, |
|
"eval_precision": 0.8965561746996403, |
|
"eval_recall": 0.8842443729903537, |
|
"eval_runtime": 16.6583, |
|
"eval_samples_per_second": 18.669, |
|
"eval_steps_per_second": 0.6, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 220, |
|
"total_flos": 6.954705718242509e+17, |
|
"train_loss": 0.23402113264257257, |
|
"train_runtime": 1621.2514, |
|
"train_samples_per_second": 17.258, |
|
"train_steps_per_second": 0.136 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 220, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.954705718242509e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|