|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 5400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.9399999999999995e-05, |
|
"loss": 12.8577, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 7.939999999999999e-05, |
|
"loss": 3.5841, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"eval_loss": 3.2514328956604004, |
|
"eval_runtime": 14.446, |
|
"eval_samples_per_second": 24.92, |
|
"eval_steps_per_second": 3.115, |
|
"eval_wer": 0.9941199529596236, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 0.0001194, |
|
"loss": 3.0775, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.81, |
|
"learning_rate": 0.00015939999999999997, |
|
"loss": 0.8996, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"learning_rate": 0.00019939999999999997, |
|
"loss": 0.3992, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"eval_loss": 0.8790363073348999, |
|
"eval_runtime": 14.0346, |
|
"eval_samples_per_second": 25.651, |
|
"eval_steps_per_second": 3.206, |
|
"eval_wer": 0.6107408859270874, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 0.0002394, |
|
"loss": 0.2879, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 25.93, |
|
"learning_rate": 0.00027939999999999996, |
|
"loss": 0.2409, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"eval_loss": 1.0011698007583618, |
|
"eval_runtime": 14.3849, |
|
"eval_samples_per_second": 25.026, |
|
"eval_steps_per_second": 3.128, |
|
"eval_wer": 0.6366130929047432, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 29.63, |
|
"learning_rate": 0.0002925384615384615, |
|
"loss": 0.2132, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 0.00027715384615384614, |
|
"loss": 0.1729, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 37.04, |
|
"learning_rate": 0.00026176923076923073, |
|
"loss": 0.1447, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 37.04, |
|
"eval_loss": 1.0167392492294312, |
|
"eval_runtime": 14.4093, |
|
"eval_samples_per_second": 24.984, |
|
"eval_steps_per_second": 3.123, |
|
"eval_wer": 0.6275970207761662, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 40.74, |
|
"learning_rate": 0.0002463846153846153, |
|
"loss": 0.1311, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"learning_rate": 0.00023099999999999998, |
|
"loss": 0.1109, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 46.3, |
|
"eval_loss": 1.0637701749801636, |
|
"eval_runtime": 13.6702, |
|
"eval_samples_per_second": 26.335, |
|
"eval_steps_per_second": 3.292, |
|
"eval_wer": 0.5652685221481771, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 48.15, |
|
"learning_rate": 0.0002156153846153846, |
|
"loss": 0.0968, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 51.85, |
|
"learning_rate": 0.00020023076923076922, |
|
"loss": 0.0865, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"learning_rate": 0.00018484615384615385, |
|
"loss": 0.0797, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"eval_loss": 1.1447213888168335, |
|
"eval_runtime": 13.6804, |
|
"eval_samples_per_second": 26.315, |
|
"eval_steps_per_second": 3.289, |
|
"eval_wer": 0.5715405723245786, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 59.26, |
|
"learning_rate": 0.00016946153846153844, |
|
"loss": 0.0728, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 62.96, |
|
"learning_rate": 0.00015407692307692306, |
|
"loss": 0.0636, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 64.81, |
|
"eval_loss": 1.1502803564071655, |
|
"eval_runtime": 14.7332, |
|
"eval_samples_per_second": 24.435, |
|
"eval_steps_per_second": 3.054, |
|
"eval_wer": 0.5315562524500196, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 0.0001386923076923077, |
|
"loss": 0.0604, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 70.37, |
|
"learning_rate": 0.0001233076923076923, |
|
"loss": 0.0499, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 74.07, |
|
"learning_rate": 0.0001079230769230769, |
|
"loss": 0.0466, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 74.07, |
|
"eval_loss": 1.2227113246917725, |
|
"eval_runtime": 14.6209, |
|
"eval_samples_per_second": 24.622, |
|
"eval_steps_per_second": 3.078, |
|
"eval_wer": 0.5386123088984712, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 77.78, |
|
"learning_rate": 9.253846153846153e-05, |
|
"loss": 0.0426, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 81.48, |
|
"learning_rate": 7.715384615384615e-05, |
|
"loss": 0.0372, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_loss": 1.121378779411316, |
|
"eval_runtime": 13.6605, |
|
"eval_samples_per_second": 26.353, |
|
"eval_steps_per_second": 3.294, |
|
"eval_wer": 0.5225401803214426, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 85.19, |
|
"learning_rate": 6.176923076923076e-05, |
|
"loss": 0.0349, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 88.89, |
|
"learning_rate": 4.6384615384615385e-05, |
|
"loss": 0.029, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 92.59, |
|
"learning_rate": 3.0999999999999995e-05, |
|
"loss": 0.0239, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 92.59, |
|
"eval_loss": 1.1375247240066528, |
|
"eval_runtime": 14.3296, |
|
"eval_samples_per_second": 25.123, |
|
"eval_steps_per_second": 3.14, |
|
"eval_wer": 0.4998039984319875, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 96.3, |
|
"learning_rate": 1.5615384615384614e-05, |
|
"loss": 0.0208, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 2.3076923076923075e-07, |
|
"loss": 0.0188, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 5400, |
|
"total_flos": 1.3559072525710848e+19, |
|
"train_loss": 0.8475217450106586, |
|
"train_runtime": 6078.829, |
|
"train_samples_per_second": 14.082, |
|
"train_steps_per_second": 0.888 |
|
} |
|
], |
|
"max_steps": 5400, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.3559072525710848e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|