|
{ |
|
"best_metric": 0.9964285714285714, |
|
"best_model_checkpoint": "vit-large-patch16-224-dungeon-geo-morphs-0-4-29Nov24-002/checkpoint-80", |
|
"epoch": 32.0, |
|
"eval_steps": 10, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 30.849088668823242, |
|
"learning_rate": 9.861111111111112e-06, |
|
"loss": 1.576, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5803571428571429, |
|
"eval_loss": 1.2765302658081055, |
|
"eval_runtime": 7.5512, |
|
"eval_samples_per_second": 74.161, |
|
"eval_steps_per_second": 9.27, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 23.055137634277344, |
|
"learning_rate": 8.472222222222223e-06, |
|
"loss": 1.0281, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8339285714285715, |
|
"eval_loss": 0.7735558748245239, |
|
"eval_runtime": 8.2438, |
|
"eval_samples_per_second": 67.93, |
|
"eval_steps_per_second": 8.491, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 20.051189422607422, |
|
"learning_rate": 7.083333333333335e-06, |
|
"loss": 0.594, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9196428571428571, |
|
"eval_loss": 0.42900553345680237, |
|
"eval_runtime": 7.5773, |
|
"eval_samples_per_second": 73.905, |
|
"eval_steps_per_second": 9.238, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 14.912154197692871, |
|
"learning_rate": 5.694444444444445e-06, |
|
"loss": 0.3375, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9660714285714286, |
|
"eval_loss": 0.264893114566803, |
|
"eval_runtime": 8.3101, |
|
"eval_samples_per_second": 67.388, |
|
"eval_steps_per_second": 8.424, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 14.604040145874023, |
|
"learning_rate": 4.305555555555556e-06, |
|
"loss": 0.2094, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9857142857142858, |
|
"eval_loss": 0.15899096429347992, |
|
"eval_runtime": 8.5413, |
|
"eval_samples_per_second": 65.564, |
|
"eval_steps_per_second": 8.196, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 11.598795890808105, |
|
"learning_rate": 2.916666666666667e-06, |
|
"loss": 0.1342, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9928571428571429, |
|
"eval_loss": 0.11229041963815689, |
|
"eval_runtime": 7.8531, |
|
"eval_samples_per_second": 71.309, |
|
"eval_steps_per_second": 8.914, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"grad_norm": 7.593981742858887, |
|
"learning_rate": 1.527777777777778e-06, |
|
"loss": 0.1041, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9928571428571429, |
|
"eval_loss": 0.09983398020267487, |
|
"eval_runtime": 8.2646, |
|
"eval_samples_per_second": 67.758, |
|
"eval_steps_per_second": 8.47, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"grad_norm": 8.414456367492676, |
|
"learning_rate": 1.3888888888888888e-07, |
|
"loss": 0.0832, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.9964285714285714, |
|
"eval_loss": 0.08847405761480331, |
|
"eval_runtime": 8.0748, |
|
"eval_samples_per_second": 69.352, |
|
"eval_steps_per_second": 8.669, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"step": 80, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_loss": 0.5083139404654503, |
|
"train_runtime": 494.5702, |
|
"train_samples_per_second": 6.47, |
|
"train_steps_per_second": 0.162 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 80, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|