|
{ |
|
"best_metric": 1.007388710975647, |
|
"best_model_checkpoint": "/kaggle/output/checkpoint-42000", |
|
"epoch": 1.7519556714471969, |
|
"eval_steps": 1000, |
|
"global_step": 43000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.7777777777777777e-11, |
|
"loss": 1.0722, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.7750000000000004e-08, |
|
"loss": 1.1407, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.32914171656686625, |
|
"eval_loss": 1.1048731803894043, |
|
"eval_runtime": 57.3781, |
|
"eval_samples_per_second": 87.316, |
|
"eval_steps_per_second": 10.928, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 5.5527777777777784e-08, |
|
"loss": 1.1253, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.3377245508982036, |
|
"eval_loss": 1.1033366918563843, |
|
"eval_runtime": 57.3237, |
|
"eval_samples_per_second": 87.398, |
|
"eval_steps_per_second": 10.938, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 8.327777777777778e-08, |
|
"loss": 1.1179, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.3502994011976048, |
|
"eval_loss": 1.104222059249878, |
|
"eval_runtime": 57.4527, |
|
"eval_samples_per_second": 87.202, |
|
"eval_steps_per_second": 10.913, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.1105555555555557e-07, |
|
"loss": 1.1197, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.3457085828343313, |
|
"eval_loss": 1.1000728607177734, |
|
"eval_runtime": 57.5257, |
|
"eval_samples_per_second": 87.091, |
|
"eval_steps_per_second": 10.899, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.3883333333333335e-07, |
|
"loss": 1.1202, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.3383233532934132, |
|
"eval_loss": 1.099381923675537, |
|
"eval_runtime": 57.825, |
|
"eval_samples_per_second": 86.641, |
|
"eval_steps_per_second": 10.843, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.6661111111111112e-07, |
|
"loss": 1.1166, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.35409181636726544, |
|
"eval_loss": 1.0975825786590576, |
|
"eval_runtime": 57.6509, |
|
"eval_samples_per_second": 86.902, |
|
"eval_steps_per_second": 10.876, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9436111111111112e-07, |
|
"loss": 1.1126, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.35508982035928144, |
|
"eval_loss": 1.0957295894622803, |
|
"eval_runtime": 57.5423, |
|
"eval_samples_per_second": 87.066, |
|
"eval_steps_per_second": 10.896, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2213888888888891e-07, |
|
"loss": 1.1091, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.36666666666666664, |
|
"eval_loss": 1.093665599822998, |
|
"eval_runtime": 57.2594, |
|
"eval_samples_per_second": 87.497, |
|
"eval_steps_per_second": 10.95, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.4988888888888893e-07, |
|
"loss": 1.1069, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.35129740518962077, |
|
"eval_loss": 1.1007317304611206, |
|
"eval_runtime": 57.5122, |
|
"eval_samples_per_second": 87.112, |
|
"eval_steps_per_second": 10.902, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.776666666666667e-07, |
|
"loss": 1.1054, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.38143712574850297, |
|
"eval_loss": 1.092604398727417, |
|
"eval_runtime": 57.5161, |
|
"eval_samples_per_second": 87.106, |
|
"eval_steps_per_second": 10.901, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.0541666666666667e-07, |
|
"loss": 1.1041, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.37704590818363276, |
|
"eval_loss": 1.0923383235931396, |
|
"eval_runtime": 57.5839, |
|
"eval_samples_per_second": 87.003, |
|
"eval_steps_per_second": 10.888, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.3319444444444444e-07, |
|
"loss": 1.1047, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.3914171656686627, |
|
"eval_loss": 1.0889469385147095, |
|
"eval_runtime": 57.877, |
|
"eval_samples_per_second": 86.563, |
|
"eval_steps_per_second": 10.833, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.6094444444444446e-07, |
|
"loss": 1.1015, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.3437125748502994, |
|
"eval_loss": 1.095380187034607, |
|
"eval_runtime": 57.5214, |
|
"eval_samples_per_second": 87.098, |
|
"eval_steps_per_second": 10.9, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.8872222222222223e-07, |
|
"loss": 1.0997, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.3596806387225549, |
|
"eval_loss": 1.096056580543518, |
|
"eval_runtime": 57.4747, |
|
"eval_samples_per_second": 87.169, |
|
"eval_steps_per_second": 10.909, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.1650000000000006e-07, |
|
"loss": 1.1022, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.38882235528942116, |
|
"eval_loss": 1.089142084121704, |
|
"eval_runtime": 57.778, |
|
"eval_samples_per_second": 86.711, |
|
"eval_steps_per_second": 10.852, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.4427777777777783e-07, |
|
"loss": 1.0985, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.3930139720558882, |
|
"eval_loss": 1.0844948291778564, |
|
"eval_runtime": 57.5695, |
|
"eval_samples_per_second": 87.025, |
|
"eval_steps_per_second": 10.891, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7202777777777785e-07, |
|
"loss": 1.0966, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.40459081836327343, |
|
"eval_loss": 1.083156704902649, |
|
"eval_runtime": 57.5224, |
|
"eval_samples_per_second": 87.096, |
|
"eval_steps_per_second": 10.9, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.998055555555556e-07, |
|
"loss": 1.0985, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.4055888223552894, |
|
"eval_loss": 1.0858113765716553, |
|
"eval_runtime": 57.413, |
|
"eval_samples_per_second": 87.263, |
|
"eval_steps_per_second": 10.921, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.275277777777778e-07, |
|
"loss": 1.0933, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.3996007984031936, |
|
"eval_loss": 1.0827423334121704, |
|
"eval_runtime": 57.2851, |
|
"eval_samples_per_second": 87.457, |
|
"eval_steps_per_second": 10.945, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.553055555555556e-07, |
|
"loss": 1.094, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.4059880239520958, |
|
"eval_loss": 1.0834325551986694, |
|
"eval_runtime": 57.4875, |
|
"eval_samples_per_second": 87.149, |
|
"eval_steps_per_second": 10.907, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 5.830555555555556e-07, |
|
"loss": 1.0924, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.3850299401197605, |
|
"eval_loss": 1.0823787450790405, |
|
"eval_runtime": 57.0913, |
|
"eval_samples_per_second": 87.754, |
|
"eval_steps_per_second": 10.982, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 6.108333333333333e-07, |
|
"loss": 1.0894, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_accuracy": 0.4101796407185629, |
|
"eval_loss": 1.0732710361480713, |
|
"eval_runtime": 57.1315, |
|
"eval_samples_per_second": 87.692, |
|
"eval_steps_per_second": 10.975, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.386111111111112e-07, |
|
"loss": 1.0812, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.43333333333333335, |
|
"eval_loss": 1.063122034072876, |
|
"eval_runtime": 57.1173, |
|
"eval_samples_per_second": 87.714, |
|
"eval_steps_per_second": 10.977, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.663611111111112e-07, |
|
"loss": 1.0751, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.43972055888223555, |
|
"eval_loss": 1.055448055267334, |
|
"eval_runtime": 57.3035, |
|
"eval_samples_per_second": 87.429, |
|
"eval_steps_per_second": 10.942, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 6.94138888888889e-07, |
|
"loss": 1.0724, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_accuracy": 0.436127744510978, |
|
"eval_loss": 1.0547080039978027, |
|
"eval_runtime": 57.4811, |
|
"eval_samples_per_second": 87.159, |
|
"eval_steps_per_second": 10.908, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 7.219166666666666e-07, |
|
"loss": 1.0682, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"eval_accuracy": 0.4297405189620758, |
|
"eval_loss": 1.0550919771194458, |
|
"eval_runtime": 57.4375, |
|
"eval_samples_per_second": 87.225, |
|
"eval_steps_per_second": 10.916, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 7.496666666666667e-07, |
|
"loss": 1.0675, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.43812375249500995, |
|
"eval_loss": 1.0507142543792725, |
|
"eval_runtime": 57.1993, |
|
"eval_samples_per_second": 87.589, |
|
"eval_steps_per_second": 10.962, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 7.774444444444445e-07, |
|
"loss": 1.0642, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"eval_accuracy": 0.4291417165668663, |
|
"eval_loss": 1.0485388040542603, |
|
"eval_runtime": 57.1444, |
|
"eval_samples_per_second": 87.673, |
|
"eval_steps_per_second": 10.972, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 8.051944444444445e-07, |
|
"loss": 1.0593, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_accuracy": 0.4413173652694611, |
|
"eval_loss": 1.0438047647476196, |
|
"eval_runtime": 57.4046, |
|
"eval_samples_per_second": 87.275, |
|
"eval_steps_per_second": 10.922, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 8.329722222222223e-07, |
|
"loss": 1.0601, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_accuracy": 0.43852295409181635, |
|
"eval_loss": 1.0402766466140747, |
|
"eval_runtime": 57.3546, |
|
"eval_samples_per_second": 87.351, |
|
"eval_steps_per_second": 10.932, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 8.607222222222223e-07, |
|
"loss": 1.0634, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.4481037924151697, |
|
"eval_loss": 1.04641854763031, |
|
"eval_runtime": 57.3392, |
|
"eval_samples_per_second": 87.375, |
|
"eval_steps_per_second": 10.935, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 8.885e-07, |
|
"loss": 1.0589, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.4303393213572854, |
|
"eval_loss": 1.0385456085205078, |
|
"eval_runtime": 57.4244, |
|
"eval_samples_per_second": 87.245, |
|
"eval_steps_per_second": 10.919, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 9.162500000000001e-07, |
|
"loss": 1.0543, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"eval_accuracy": 0.4536926147704591, |
|
"eval_loss": 1.033170223236084, |
|
"eval_runtime": 57.3319, |
|
"eval_samples_per_second": 87.386, |
|
"eval_steps_per_second": 10.936, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 9.440277777777779e-07, |
|
"loss": 1.0481, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_accuracy": 0.4489021956087824, |
|
"eval_loss": 1.0322452783584595, |
|
"eval_runtime": 57.6214, |
|
"eval_samples_per_second": 86.947, |
|
"eval_steps_per_second": 10.881, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.71777777777778e-07, |
|
"loss": 1.0525, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_accuracy": 0.45069860279441115, |
|
"eval_loss": 1.0243780612945557, |
|
"eval_runtime": 57.4229, |
|
"eval_samples_per_second": 87.247, |
|
"eval_steps_per_second": 10.919, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.995555555555557e-07, |
|
"loss": 1.05, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.4598802395209581, |
|
"eval_loss": 1.020882487297058, |
|
"eval_runtime": 57.5219, |
|
"eval_samples_per_second": 87.097, |
|
"eval_steps_per_second": 10.9, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.0273055555555556e-06, |
|
"loss": 1.0471, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"eval_accuracy": 0.45189620758483035, |
|
"eval_loss": 1.0195881128311157, |
|
"eval_runtime": 57.338, |
|
"eval_samples_per_second": 87.377, |
|
"eval_steps_per_second": 10.935, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 1.0550833333333334e-06, |
|
"loss": 1.0471, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_accuracy": 0.4532934131736527, |
|
"eval_loss": 1.0168437957763672, |
|
"eval_runtime": 57.6557, |
|
"eval_samples_per_second": 86.895, |
|
"eval_steps_per_second": 10.875, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.0828611111111111e-06, |
|
"loss": 1.0467, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.4491017964071856, |
|
"eval_loss": 1.020241379737854, |
|
"eval_runtime": 57.5714, |
|
"eval_samples_per_second": 87.022, |
|
"eval_steps_per_second": 10.891, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 1.1106111111111112e-06, |
|
"loss": 1.0378, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.455688622754491, |
|
"eval_loss": 1.0142648220062256, |
|
"eval_runtime": 57.4804, |
|
"eval_samples_per_second": 87.16, |
|
"eval_steps_per_second": 10.908, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.138388888888889e-06, |
|
"loss": 1.043, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_accuracy": 0.4600798403193613, |
|
"eval_loss": 1.021972417831421, |
|
"eval_runtime": 57.3072, |
|
"eval_samples_per_second": 87.424, |
|
"eval_steps_per_second": 10.941, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 1.166138888888889e-06, |
|
"loss": 1.0358, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_accuracy": 0.4654690618762475, |
|
"eval_loss": 1.007388710975647, |
|
"eval_runtime": 57.1318, |
|
"eval_samples_per_second": 87.692, |
|
"eval_steps_per_second": 10.975, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 1.1939166666666668e-06, |
|
"loss": 1.035, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.449500998003992, |
|
"eval_loss": 1.032967448234558, |
|
"eval_runtime": 57.4098, |
|
"eval_samples_per_second": 87.267, |
|
"eval_steps_per_second": 10.921, |
|
"step": 43000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 10000000, |
|
"num_train_epochs": 408, |
|
"save_steps": 1000, |
|
"total_flos": 8.98850093114327e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|