hr16's picture
Training in progress, epoch 1, checkpoint
60b053d verified
raw
history blame
17.5 kB
{
"best_metric": 0.8470824949698189,
"best_model_checkpoint": "PhoWhisper-small-vispeech-classifier-v4/checkpoint-490",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 490,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01020408163265306,
"grad_norm": 56193.578125,
"learning_rate": 2.5510204081632652e-08,
"loss": 2.0798,
"step": 5
},
{
"epoch": 0.02040816326530612,
"grad_norm": 50520.33203125,
"learning_rate": 5.1020408163265303e-08,
"loss": 2.0772,
"step": 10
},
{
"epoch": 0.030612244897959183,
"grad_norm": 47583.30859375,
"learning_rate": 7.653061224489796e-08,
"loss": 2.0762,
"step": 15
},
{
"epoch": 0.04081632653061224,
"grad_norm": 53174.3828125,
"learning_rate": 1.0204081632653061e-07,
"loss": 2.0754,
"step": 20
},
{
"epoch": 0.05102040816326531,
"grad_norm": 65972.65625,
"learning_rate": 1.2755102040816328e-07,
"loss": 2.0755,
"step": 25
},
{
"epoch": 0.061224489795918366,
"grad_norm": 53920.14453125,
"learning_rate": 1.5306122448979592e-07,
"loss": 2.0745,
"step": 30
},
{
"epoch": 0.07142857142857142,
"grad_norm": 52966.375,
"learning_rate": 1.7857142857142858e-07,
"loss": 2.0734,
"step": 35
},
{
"epoch": 0.08163265306122448,
"grad_norm": 60549.94921875,
"learning_rate": 2.0408163265306121e-07,
"loss": 2.0778,
"step": 40
},
{
"epoch": 0.09183673469387756,
"grad_norm": 55827.60546875,
"learning_rate": 2.295918367346939e-07,
"loss": 2.0787,
"step": 45
},
{
"epoch": 0.10204081632653061,
"grad_norm": 51334.3359375,
"learning_rate": 2.5510204081632656e-07,
"loss": 2.0759,
"step": 50
},
{
"epoch": 0.11224489795918367,
"grad_norm": 70249.5078125,
"learning_rate": 2.806122448979592e-07,
"loss": 2.0698,
"step": 55
},
{
"epoch": 0.12244897959183673,
"grad_norm": 54763.26171875,
"learning_rate": 3.0612244897959183e-07,
"loss": 2.0762,
"step": 60
},
{
"epoch": 0.1326530612244898,
"grad_norm": 58244.69921875,
"learning_rate": 3.3163265306122455e-07,
"loss": 2.0696,
"step": 65
},
{
"epoch": 0.14285714285714285,
"grad_norm": 51894.46484375,
"learning_rate": 3.5714285714285716e-07,
"loss": 2.0702,
"step": 70
},
{
"epoch": 0.15306122448979592,
"grad_norm": 72845.734375,
"learning_rate": 3.826530612244898e-07,
"loss": 2.0676,
"step": 75
},
{
"epoch": 0.16326530612244897,
"grad_norm": 56673.85546875,
"learning_rate": 4.0816326530612243e-07,
"loss": 2.0693,
"step": 80
},
{
"epoch": 0.17346938775510204,
"grad_norm": 61739.0546875,
"learning_rate": 4.3367346938775514e-07,
"loss": 2.0668,
"step": 85
},
{
"epoch": 0.1836734693877551,
"grad_norm": 68551.390625,
"learning_rate": 4.591836734693878e-07,
"loss": 2.0651,
"step": 90
},
{
"epoch": 0.19387755102040816,
"grad_norm": 55835.78125,
"learning_rate": 4.846938775510205e-07,
"loss": 2.0701,
"step": 95
},
{
"epoch": 0.20408163265306123,
"grad_norm": 60520.2734375,
"learning_rate": 5.102040816326531e-07,
"loss": 2.0654,
"step": 100
},
{
"epoch": 0.21428571428571427,
"grad_norm": 45642.94921875,
"learning_rate": 5.357142857142857e-07,
"loss": 2.0672,
"step": 105
},
{
"epoch": 0.22448979591836735,
"grad_norm": 67603.8515625,
"learning_rate": 5.612244897959184e-07,
"loss": 2.0673,
"step": 110
},
{
"epoch": 0.23469387755102042,
"grad_norm": 54359.4375,
"learning_rate": 5.867346938775511e-07,
"loss": 2.0604,
"step": 115
},
{
"epoch": 0.24489795918367346,
"grad_norm": 81289.5390625,
"learning_rate": 6.122448979591837e-07,
"loss": 2.061,
"step": 120
},
{
"epoch": 0.25510204081632654,
"grad_norm": 70781.5703125,
"learning_rate": 6.377551020408164e-07,
"loss": 2.0573,
"step": 125
},
{
"epoch": 0.2653061224489796,
"grad_norm": 73178.1875,
"learning_rate": 6.632653061224491e-07,
"loss": 2.0541,
"step": 130
},
{
"epoch": 0.2755102040816326,
"grad_norm": 69632.25,
"learning_rate": 6.887755102040817e-07,
"loss": 2.0603,
"step": 135
},
{
"epoch": 0.2857142857142857,
"grad_norm": 54642.47265625,
"learning_rate": 7.142857142857143e-07,
"loss": 2.0587,
"step": 140
},
{
"epoch": 0.29591836734693877,
"grad_norm": 64884.2890625,
"learning_rate": 7.39795918367347e-07,
"loss": 2.0574,
"step": 145
},
{
"epoch": 0.30612244897959184,
"grad_norm": 74709.9609375,
"learning_rate": 7.653061224489796e-07,
"loss": 2.0466,
"step": 150
},
{
"epoch": 0.3163265306122449,
"grad_norm": 59361.31640625,
"learning_rate": 7.908163265306124e-07,
"loss": 2.0473,
"step": 155
},
{
"epoch": 0.32653061224489793,
"grad_norm": 76060.171875,
"learning_rate": 8.163265306122449e-07,
"loss": 2.0413,
"step": 160
},
{
"epoch": 0.336734693877551,
"grad_norm": 52821.26953125,
"learning_rate": 8.418367346938776e-07,
"loss": 2.0495,
"step": 165
},
{
"epoch": 0.3469387755102041,
"grad_norm": 55647.62109375,
"learning_rate": 8.673469387755103e-07,
"loss": 2.0361,
"step": 170
},
{
"epoch": 0.35714285714285715,
"grad_norm": 49648.828125,
"learning_rate": 8.928571428571429e-07,
"loss": 2.0395,
"step": 175
},
{
"epoch": 0.3673469387755102,
"grad_norm": 63527.9140625,
"learning_rate": 9.183673469387756e-07,
"loss": 2.0334,
"step": 180
},
{
"epoch": 0.37755102040816324,
"grad_norm": 80417.828125,
"learning_rate": 9.438775510204082e-07,
"loss": 2.0331,
"step": 185
},
{
"epoch": 0.3877551020408163,
"grad_norm": 82782.7265625,
"learning_rate": 9.69387755102041e-07,
"loss": 2.0251,
"step": 190
},
{
"epoch": 0.3979591836734694,
"grad_norm": 66964.8125,
"learning_rate": 9.948979591836735e-07,
"loss": 2.0289,
"step": 195
},
{
"epoch": 0.40816326530612246,
"grad_norm": 73848.046875,
"learning_rate": 1.0204081632653063e-06,
"loss": 2.0259,
"step": 200
},
{
"epoch": 0.41836734693877553,
"grad_norm": 70241.171875,
"learning_rate": 1.0459183673469388e-06,
"loss": 2.0082,
"step": 205
},
{
"epoch": 0.42857142857142855,
"grad_norm": 63360.8125,
"learning_rate": 1.0714285714285714e-06,
"loss": 2.0091,
"step": 210
},
{
"epoch": 0.4387755102040816,
"grad_norm": 82052.796875,
"learning_rate": 1.0969387755102041e-06,
"loss": 2.0008,
"step": 215
},
{
"epoch": 0.4489795918367347,
"grad_norm": 63768.30078125,
"learning_rate": 1.122448979591837e-06,
"loss": 1.9958,
"step": 220
},
{
"epoch": 0.45918367346938777,
"grad_norm": 90230.640625,
"learning_rate": 1.1479591836734695e-06,
"loss": 1.9816,
"step": 225
},
{
"epoch": 0.46938775510204084,
"grad_norm": 48253.55078125,
"learning_rate": 1.1734693877551022e-06,
"loss": 2.0064,
"step": 230
},
{
"epoch": 0.47959183673469385,
"grad_norm": 73846.0859375,
"learning_rate": 1.1989795918367348e-06,
"loss": 1.996,
"step": 235
},
{
"epoch": 0.4897959183673469,
"grad_norm": 86008.8046875,
"learning_rate": 1.2244897959183673e-06,
"loss": 1.9858,
"step": 240
},
{
"epoch": 0.5,
"grad_norm": 86437.734375,
"learning_rate": 1.25e-06,
"loss": 1.9696,
"step": 245
},
{
"epoch": 0.5102040816326531,
"grad_norm": 82227.921875,
"learning_rate": 1.2755102040816329e-06,
"loss": 1.9803,
"step": 250
},
{
"epoch": 0.5204081632653061,
"grad_norm": 96575.4765625,
"learning_rate": 1.3010204081632654e-06,
"loss": 1.9375,
"step": 255
},
{
"epoch": 0.5306122448979592,
"grad_norm": 89167.7265625,
"learning_rate": 1.3265306122448982e-06,
"loss": 1.9585,
"step": 260
},
{
"epoch": 0.5408163265306123,
"grad_norm": 101032.5,
"learning_rate": 1.3520408163265307e-06,
"loss": 1.947,
"step": 265
},
{
"epoch": 0.5510204081632653,
"grad_norm": 105424.25,
"learning_rate": 1.3775510204081633e-06,
"loss": 1.9482,
"step": 270
},
{
"epoch": 0.5612244897959183,
"grad_norm": 84136.484375,
"learning_rate": 1.4030612244897959e-06,
"loss": 1.9386,
"step": 275
},
{
"epoch": 0.5714285714285714,
"grad_norm": 83034.078125,
"learning_rate": 1.4285714285714286e-06,
"loss": 1.9245,
"step": 280
},
{
"epoch": 0.5816326530612245,
"grad_norm": 89200.4609375,
"learning_rate": 1.4540816326530614e-06,
"loss": 1.9138,
"step": 285
},
{
"epoch": 0.5918367346938775,
"grad_norm": 103667.9609375,
"learning_rate": 1.479591836734694e-06,
"loss": 1.9283,
"step": 290
},
{
"epoch": 0.6020408163265306,
"grad_norm": 98443.2734375,
"learning_rate": 1.5051020408163267e-06,
"loss": 1.9025,
"step": 295
},
{
"epoch": 0.6122448979591837,
"grad_norm": 99525.0,
"learning_rate": 1.5306122448979593e-06,
"loss": 1.8933,
"step": 300
},
{
"epoch": 0.6224489795918368,
"grad_norm": 145139.5625,
"learning_rate": 1.556122448979592e-06,
"loss": 1.8563,
"step": 305
},
{
"epoch": 0.6326530612244898,
"grad_norm": 116158.859375,
"learning_rate": 1.5816326530612248e-06,
"loss": 1.8931,
"step": 310
},
{
"epoch": 0.6428571428571429,
"grad_norm": 131590.09375,
"learning_rate": 1.6071428571428574e-06,
"loss": 1.8599,
"step": 315
},
{
"epoch": 0.6530612244897959,
"grad_norm": 112483.40625,
"learning_rate": 1.6326530612244897e-06,
"loss": 1.8596,
"step": 320
},
{
"epoch": 0.6632653061224489,
"grad_norm": 82979.0859375,
"learning_rate": 1.6581632653061225e-06,
"loss": 1.883,
"step": 325
},
{
"epoch": 0.673469387755102,
"grad_norm": 96548.5703125,
"learning_rate": 1.6836734693877552e-06,
"loss": 1.8586,
"step": 330
},
{
"epoch": 0.6836734693877551,
"grad_norm": 113625.171875,
"learning_rate": 1.7091836734693878e-06,
"loss": 1.8375,
"step": 335
},
{
"epoch": 0.6938775510204082,
"grad_norm": 115079.0078125,
"learning_rate": 1.7346938775510206e-06,
"loss": 1.838,
"step": 340
},
{
"epoch": 0.7040816326530612,
"grad_norm": 139868.21875,
"learning_rate": 1.7602040816326531e-06,
"loss": 1.8013,
"step": 345
},
{
"epoch": 0.7142857142857143,
"grad_norm": 126350.546875,
"learning_rate": 1.7857142857142859e-06,
"loss": 1.847,
"step": 350
},
{
"epoch": 0.7244897959183674,
"grad_norm": 149690.375,
"learning_rate": 1.8112244897959187e-06,
"loss": 1.8203,
"step": 355
},
{
"epoch": 0.7346938775510204,
"grad_norm": 132708.578125,
"learning_rate": 1.8367346938775512e-06,
"loss": 1.7931,
"step": 360
},
{
"epoch": 0.7448979591836735,
"grad_norm": 128203.3203125,
"learning_rate": 1.862244897959184e-06,
"loss": 1.7533,
"step": 365
},
{
"epoch": 0.7551020408163265,
"grad_norm": 118367.4765625,
"learning_rate": 1.8877551020408163e-06,
"loss": 1.7662,
"step": 370
},
{
"epoch": 0.7653061224489796,
"grad_norm": 162099.25,
"learning_rate": 1.913265306122449e-06,
"loss": 1.7775,
"step": 375
},
{
"epoch": 0.7755102040816326,
"grad_norm": 132369.234375,
"learning_rate": 1.938775510204082e-06,
"loss": 1.7908,
"step": 380
},
{
"epoch": 0.7857142857142857,
"grad_norm": 161412.09375,
"learning_rate": 1.9642857142857144e-06,
"loss": 1.7045,
"step": 385
},
{
"epoch": 0.7959183673469388,
"grad_norm": 117275.65625,
"learning_rate": 1.989795918367347e-06,
"loss": 1.74,
"step": 390
},
{
"epoch": 0.8061224489795918,
"grad_norm": 134175.890625,
"learning_rate": 2.01530612244898e-06,
"loss": 1.7041,
"step": 395
},
{
"epoch": 0.8163265306122449,
"grad_norm": 165093.34375,
"learning_rate": 2.0408163265306125e-06,
"loss": 1.7217,
"step": 400
},
{
"epoch": 0.826530612244898,
"grad_norm": 108558.96875,
"learning_rate": 2.066326530612245e-06,
"loss": 1.7211,
"step": 405
},
{
"epoch": 0.8367346938775511,
"grad_norm": 159761.328125,
"learning_rate": 2.0918367346938776e-06,
"loss": 1.7056,
"step": 410
},
{
"epoch": 0.8469387755102041,
"grad_norm": 131097.640625,
"learning_rate": 2.1173469387755106e-06,
"loss": 1.7289,
"step": 415
},
{
"epoch": 0.8571428571428571,
"grad_norm": 162492.6875,
"learning_rate": 2.1428571428571427e-06,
"loss": 1.7312,
"step": 420
},
{
"epoch": 0.8673469387755102,
"grad_norm": 122069.125,
"learning_rate": 2.1683673469387757e-06,
"loss": 1.6696,
"step": 425
},
{
"epoch": 0.8775510204081632,
"grad_norm": 194332.5625,
"learning_rate": 2.1938775510204083e-06,
"loss": 1.6226,
"step": 430
},
{
"epoch": 0.8877551020408163,
"grad_norm": 132711.28125,
"learning_rate": 2.219387755102041e-06,
"loss": 1.663,
"step": 435
},
{
"epoch": 0.8979591836734694,
"grad_norm": 166239.375,
"learning_rate": 2.244897959183674e-06,
"loss": 1.6127,
"step": 440
},
{
"epoch": 0.9081632653061225,
"grad_norm": 132459.359375,
"learning_rate": 2.2704081632653064e-06,
"loss": 1.6042,
"step": 445
},
{
"epoch": 0.9183673469387755,
"grad_norm": 114693.3046875,
"learning_rate": 2.295918367346939e-06,
"loss": 1.6276,
"step": 450
},
{
"epoch": 0.9285714285714286,
"grad_norm": 184974.53125,
"learning_rate": 2.321428571428572e-06,
"loss": 1.6448,
"step": 455
},
{
"epoch": 0.9387755102040817,
"grad_norm": 134113.625,
"learning_rate": 2.3469387755102044e-06,
"loss": 1.6329,
"step": 460
},
{
"epoch": 0.9489795918367347,
"grad_norm": 137895.78125,
"learning_rate": 2.372448979591837e-06,
"loss": 1.5441,
"step": 465
},
{
"epoch": 0.9591836734693877,
"grad_norm": 158595.234375,
"learning_rate": 2.3979591836734696e-06,
"loss": 1.5524,
"step": 470
},
{
"epoch": 0.9693877551020408,
"grad_norm": 162135.703125,
"learning_rate": 2.423469387755102e-06,
"loss": 1.5796,
"step": 475
},
{
"epoch": 0.9795918367346939,
"grad_norm": 205519.296875,
"learning_rate": 2.4489795918367347e-06,
"loss": 1.5343,
"step": 480
},
{
"epoch": 0.9897959183673469,
"grad_norm": 193819.75,
"learning_rate": 2.4744897959183676e-06,
"loss": 1.5776,
"step": 485
},
{
"epoch": 1.0,
"grad_norm": 225175.609375,
"learning_rate": 2.5e-06,
"loss": 1.5384,
"step": 490
},
{
"epoch": 1.0,
"eval_accuracy": 0.8470824949698189,
"eval_loss": 1.4336239099502563,
"eval_runtime": 214.7827,
"eval_samples_per_second": 13.884,
"eval_steps_per_second": 0.582,
"step": 490
}
],
"logging_steps": 5,
"max_steps": 4900,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.49679069336576e+18,
"train_batch_size": 24,
"trial_name": null,
"trial_params": null
}