|
{ |
|
"best_metric": 2.0210015773773193, |
|
"best_model_checkpoint": "./whisper-LoRA-small-ha/checkpoint-2000", |
|
"epoch": 14.331210191082803, |
|
"eval_steps": 250, |
|
"global_step": 2250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 10.639625549316406, |
|
"learning_rate": 1.3800000000000002e-05, |
|
"loss": 5.8923, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 3.0570621490478516, |
|
"learning_rate": 2.88e-05, |
|
"loss": 5.1217, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 1.420814037322998, |
|
"learning_rate": 3e-05, |
|
"loss": 4.1046, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 1.6677359342575073, |
|
"learning_rate": 3e-05, |
|
"loss": 3.5633, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 1.1571909189224243, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2634, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 1.4416701793670654, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2152, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 1.3764861822128296, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1248, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 1.537541389465332, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0425, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 1.239444613456726, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0277, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 1.4676295518875122, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0616, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"eval_loss": 2.9989981651306152, |
|
"eval_runtime": 130.5989, |
|
"eval_samples_per_second": 5.054, |
|
"eval_steps_per_second": 0.636, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 1.5393909215927124, |
|
"learning_rate": 3e-05, |
|
"loss": 2.9165, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 1.4913767576217651, |
|
"learning_rate": 3e-05, |
|
"loss": 2.8888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 1.8603297472000122, |
|
"learning_rate": 3e-05, |
|
"loss": 2.8715, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 1.5909651517868042, |
|
"learning_rate": 3e-05, |
|
"loss": 2.7833, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 33.26897048950195, |
|
"learning_rate": 3e-05, |
|
"loss": 2.6853, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 1.984903335571289, |
|
"learning_rate": 3e-05, |
|
"loss": 2.7053, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 1.6659773588180542, |
|
"learning_rate": 3e-05, |
|
"loss": 2.6967, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 2.684084415435791, |
|
"learning_rate": 3e-05, |
|
"loss": 2.63, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 1.7889983654022217, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1224, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 2.101982831954956, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1053, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 2.131725311279297, |
|
"eval_runtime": 131.015, |
|
"eval_samples_per_second": 5.038, |
|
"eval_steps_per_second": 0.634, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 1.9597238302230835, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1066, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 1.7918522357940674, |
|
"learning_rate": 3e-05, |
|
"loss": 2.043, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 1.6857051849365234, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9991, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 1.8661901950836182, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0209, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 2.1587560176849365, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0404, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 1.7673060894012451, |
|
"learning_rate": 3e-05, |
|
"loss": 2.052, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 1.8025519847869873, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9958, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 2.0931341648101807, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9864, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 1.7860947847366333, |
|
"learning_rate": 3e-05, |
|
"loss": 1.998, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 2.118542432785034, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0123, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"eval_loss": 2.090299606323242, |
|
"eval_runtime": 131.9058, |
|
"eval_samples_per_second": 5.004, |
|
"eval_steps_per_second": 0.629, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 2.0395212173461914, |
|
"learning_rate": 3e-05, |
|
"loss": 2.0432, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 2.1469058990478516, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9556, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 2.0194575786590576, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9874, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 2.0585408210754395, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9637, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 1.9950058460235596, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9731, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 2.102336883544922, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9443, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 1.905841588973999, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9753, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 2.1166951656341553, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9176, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 2.2197279930114746, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8839, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 2.578101873397827, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9218, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 2.0661463737487793, |
|
"eval_runtime": 131.3996, |
|
"eval_samples_per_second": 5.023, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 2.2647619247436523, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9609, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 2.4119601249694824, |
|
"learning_rate": 3e-05, |
|
"loss": 1.961, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 2.380333662033081, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9162, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 2.429640293121338, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9468, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 2.362521171569824, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9068, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 2.479595899581909, |
|
"learning_rate": 3e-05, |
|
"loss": 1.908, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 2.53534197807312, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8769, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 2.207059144973755, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8427, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 2.4016318321228027, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9285, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 2.836793899536133, |
|
"learning_rate": 3e-05, |
|
"loss": 1.894, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"eval_loss": 2.0512537956237793, |
|
"eval_runtime": 131.0055, |
|
"eval_samples_per_second": 5.038, |
|
"eval_steps_per_second": 0.634, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 2.402125358581543, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8639, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 2.61692214012146, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8427, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 2.6917548179626465, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8541, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 3.4319992065429688, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8926, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 2.572566032409668, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7997, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 2.649146795272827, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8534, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 2.9375128746032715, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8279, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 2.6020314693450928, |
|
"learning_rate": 3e-05, |
|
"loss": 1.9072, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 2.7787463665008545, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8113, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 2.8579261302948, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7961, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 2.034783363342285, |
|
"eval_runtime": 131.7061, |
|
"eval_samples_per_second": 5.011, |
|
"eval_steps_per_second": 0.63, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 2.863452911376953, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7903, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 3.1351847648620605, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7899, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 2.9801595211029053, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8365, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 2.9951841831207275, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7694, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 3.2015936374664307, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7946, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 2.942176103591919, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7126, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 3.4911484718322754, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7643, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 3.2129969596862793, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8453, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 2.9529011249542236, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7673, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 2.8384742736816406, |
|
"learning_rate": 3e-05, |
|
"loss": 1.764, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"eval_loss": 2.0300416946411133, |
|
"eval_runtime": 132.059, |
|
"eval_samples_per_second": 4.998, |
|
"eval_steps_per_second": 0.629, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 3.514625072479248, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7332, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 3.1534969806671143, |
|
"learning_rate": 3e-05, |
|
"loss": 1.8226, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 3.1041505336761475, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7624, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 3.0769126415252686, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7711, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 3.060081958770752, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6919, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 3.662893056869507, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7149, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 3.292971134185791, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6796, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 3.3103177547454834, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7545, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 3.5080769062042236, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7648, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 3.3745033740997314, |
|
"learning_rate": 3e-05, |
|
"loss": 1.748, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 2.0210015773773193, |
|
"eval_runtime": 131.5955, |
|
"eval_samples_per_second": 5.015, |
|
"eval_steps_per_second": 0.631, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.898089171974522, |
|
"grad_norm": 3.3616280555725098, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6712, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 13.05732484076433, |
|
"grad_norm": 3.6867527961730957, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6881, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 13.21656050955414, |
|
"grad_norm": 3.1726772785186768, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6976, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 13.375796178343949, |
|
"grad_norm": 3.53255558013916, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7178, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 13.535031847133759, |
|
"grad_norm": 3.7484970092773438, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6847, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 13.694267515923567, |
|
"grad_norm": 3.8939905166625977, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6349, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 13.853503184713375, |
|
"grad_norm": 3.3592910766601562, |
|
"learning_rate": 3e-05, |
|
"loss": 1.7276, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 14.012738853503185, |
|
"grad_norm": 3.772204875946045, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6284, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 14.171974522292993, |
|
"grad_norm": 4.016607761383057, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6573, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 14.331210191082803, |
|
"grad_norm": 3.5693182945251465, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6554, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 14.331210191082803, |
|
"eval_loss": 2.0353081226348877, |
|
"eval_runtime": 133.6329, |
|
"eval_samples_per_second": 4.939, |
|
"eval_steps_per_second": 0.621, |
|
"step": 2250 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 3140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 250, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.055608710561792e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|