|
{ |
|
"best_metric": 2.090299606323242, |
|
"best_model_checkpoint": "./whisper-LoRA-small-ha/checkpoint-750", |
|
"epoch": 4.777070063694268, |
|
"eval_steps": 250, |
|
"global_step": 750, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 10.639625549316406, |
|
"learning_rate": 1.3800000000000002e-05, |
|
"loss": 5.8923, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 3.0570621490478516, |
|
"learning_rate": 2.88e-05, |
|
"loss": 5.1217, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 1.420814037322998, |
|
"learning_rate": 3e-05, |
|
"loss": 4.1046, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 1.6677359342575073, |
|
"learning_rate": 3e-05, |
|
"loss": 3.5633, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 1.1571909189224243, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2634, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 1.4416701793670654, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2152, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 1.3764861822128296, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1248, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 1.537541389465332, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0425, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 1.239444613456726, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0277, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 1.4676295518875122, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0616, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"eval_loss": 2.9989981651306152, |
|
"eval_runtime": 130.5989, |
|
"eval_samples_per_second": 5.054, |
|
"eval_steps_per_second": 0.636, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 1.5393909215927124, |
|
"learning_rate": 3e-05, |
|
"loss": 2.9165, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 1.4913767576217651, |
|
"learning_rate": 3e-05, |
|
"loss": 2.8888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 1.8603297472000122, |
|
"learning_rate": 3e-05, |
|
"loss": 2.8715, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 1.5909651517868042, |
|
"learning_rate": 3e-05, |
|
"loss": 2.7833, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 33.26897048950195, |
|
"learning_rate": 3e-05, |
|
"loss": 2.6853, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 1.984903335571289, |
|
"learning_rate": 3e-05, |
|
"loss": 2.7053, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 1.6659773588180542, |
|
"learning_rate": 3e-05, |
|
"loss": 2.6967, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 2.684084415435791, |
|
"learning_rate": 3e-05, |
|
"loss": 2.63, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 1.7889983654022217, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1224, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 2.101982831954956, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1053, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 2.131725311279297, |
|
"eval_runtime": 131.015, |
|
"eval_samples_per_second": 5.038, |
|
"eval_steps_per_second": 0.634, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 1.9597238302230835, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1066, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 1.7918522357940674, |
|
"learning_rate": 3e-05, |
|
"loss": 2.043, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 1.6857051849365234, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9991, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 1.8661901950836182, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0209, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 2.1587560176849365, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0404, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 1.7673060894012451, |
|
"learning_rate": 3e-05, |
|
"loss": 2.052, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 1.8025519847869873, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9958, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 2.0931341648101807, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9864, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 1.7860947847366333, |
|
"learning_rate": 3e-05, |
|
"loss": 1.998, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 2.118542432785034, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0123, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"eval_loss": 2.090299606323242, |
|
"eval_runtime": 131.9058, |
|
"eval_samples_per_second": 5.004, |
|
"eval_steps_per_second": 0.629, |
|
"step": 750 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 3140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 250, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.51947885248512e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|