|
{ |
|
"best_metric": 0.16742652654647827, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.2403846153846154, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002403846153846154, |
|
"grad_norm": 1.1853702068328857, |
|
"learning_rate": 1e-06, |
|
"loss": 0.5942, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002403846153846154, |
|
"eval_loss": 1.188090205192566, |
|
"eval_runtime": 12.7629, |
|
"eval_samples_per_second": 13.79, |
|
"eval_steps_per_second": 3.447, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004807692307692308, |
|
"grad_norm": 1.4127286672592163, |
|
"learning_rate": 2e-06, |
|
"loss": 0.7545, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007211538461538462, |
|
"grad_norm": 1.4509857892990112, |
|
"learning_rate": 3e-06, |
|
"loss": 0.6684, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009615384615384616, |
|
"grad_norm": 1.5004510879516602, |
|
"learning_rate": 4e-06, |
|
"loss": 0.7764, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01201923076923077, |
|
"grad_norm": 1.8388277292251587, |
|
"learning_rate": 4.9999999999999996e-06, |
|
"loss": 0.9586, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014423076923076924, |
|
"grad_norm": 1.8136910200119019, |
|
"learning_rate": 6e-06, |
|
"loss": 0.7699, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.016826923076923076, |
|
"grad_norm": 1.7008845806121826, |
|
"learning_rate": 7e-06, |
|
"loss": 0.6792, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.019230769230769232, |
|
"grad_norm": 1.8676527738571167, |
|
"learning_rate": 8e-06, |
|
"loss": 0.846, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.021634615384615384, |
|
"grad_norm": 2.0595245361328125, |
|
"learning_rate": 9e-06, |
|
"loss": 0.7905, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02403846153846154, |
|
"grad_norm": 2.2339797019958496, |
|
"learning_rate": 9.999999999999999e-06, |
|
"loss": 0.8244, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.026442307692307692, |
|
"grad_norm": 1.8361808061599731, |
|
"learning_rate": 1.1e-05, |
|
"loss": 0.6951, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.028846153846153848, |
|
"grad_norm": 2.103513240814209, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.8303, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03125, |
|
"grad_norm": 1.8775490522384644, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 0.669, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03365384615384615, |
|
"grad_norm": 2.3700971603393555, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.694, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.036057692307692304, |
|
"grad_norm": 1.9218188524246216, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.5904, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.038461538461538464, |
|
"grad_norm": 1.9600694179534912, |
|
"learning_rate": 1.6e-05, |
|
"loss": 0.8039, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.040865384615384616, |
|
"grad_norm": 1.5222437381744385, |
|
"learning_rate": 1.7e-05, |
|
"loss": 0.3499, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04326923076923077, |
|
"grad_norm": 1.8209494352340698, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.5223, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04567307692307692, |
|
"grad_norm": 1.699657678604126, |
|
"learning_rate": 1.9e-05, |
|
"loss": 0.5394, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04807692307692308, |
|
"grad_norm": 1.1377636194229126, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.4437, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05048076923076923, |
|
"grad_norm": 1.1468883752822876, |
|
"learning_rate": 2.1e-05, |
|
"loss": 0.2422, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.052884615384615384, |
|
"grad_norm": 1.6879323720932007, |
|
"learning_rate": 2.2e-05, |
|
"loss": 0.5082, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.055288461538461536, |
|
"grad_norm": 1.2606359720230103, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 0.2604, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.057692307692307696, |
|
"grad_norm": 2.3082375526428223, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.384, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06009615384615385, |
|
"grad_norm": 1.574620246887207, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.3298, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0625, |
|
"grad_norm": 1.626622200012207, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.3545, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06490384615384616, |
|
"grad_norm": 1.2426509857177734, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 0.2729, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0673076923076923, |
|
"grad_norm": 0.9696183800697327, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.1175, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.06971153846153846, |
|
"grad_norm": 1.4881669282913208, |
|
"learning_rate": 2.9e-05, |
|
"loss": 0.3583, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07211538461538461, |
|
"grad_norm": 2.0574419498443604, |
|
"learning_rate": 3e-05, |
|
"loss": 0.4548, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07451923076923077, |
|
"grad_norm": 1.8662818670272827, |
|
"learning_rate": 2.9984895998119723e-05, |
|
"loss": 0.4393, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07692307692307693, |
|
"grad_norm": 1.9631654024124146, |
|
"learning_rate": 2.993961440992859e-05, |
|
"loss": 0.3641, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.07932692307692307, |
|
"grad_norm": 1.6589587926864624, |
|
"learning_rate": 2.9864246426519023e-05, |
|
"loss": 0.2092, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08173076923076923, |
|
"grad_norm": 1.5731538534164429, |
|
"learning_rate": 2.9758943828979444e-05, |
|
"loss": 0.1979, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08413461538461539, |
|
"grad_norm": 2.270122766494751, |
|
"learning_rate": 2.9623918682727355e-05, |
|
"loss": 0.2475, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08653846153846154, |
|
"grad_norm": 1.6105525493621826, |
|
"learning_rate": 2.9459442910437798e-05, |
|
"loss": 0.2663, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0889423076923077, |
|
"grad_norm": 1.0379960536956787, |
|
"learning_rate": 2.9265847744427305e-05, |
|
"loss": 0.0822, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09134615384615384, |
|
"grad_norm": 1.4877510070800781, |
|
"learning_rate": 2.904352305959606e-05, |
|
"loss": 0.1749, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09375, |
|
"grad_norm": 1.4685450792312622, |
|
"learning_rate": 2.8792916588271762e-05, |
|
"loss": 0.176, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09615384615384616, |
|
"grad_norm": 0.9629994630813599, |
|
"learning_rate": 2.8514533018536286e-05, |
|
"loss": 0.089, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0985576923076923, |
|
"grad_norm": 0.7029222846031189, |
|
"learning_rate": 2.820893297785107e-05, |
|
"loss": 0.0612, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10096153846153846, |
|
"grad_norm": 1.3228390216827393, |
|
"learning_rate": 2.7876731904027994e-05, |
|
"loss": 0.0963, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10336538461538461, |
|
"grad_norm": 1.0944647789001465, |
|
"learning_rate": 2.7518598805819542e-05, |
|
"loss": 0.1305, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.10576923076923077, |
|
"grad_norm": 0.9398048520088196, |
|
"learning_rate": 2.7135254915624213e-05, |
|
"loss": 0.0518, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.10817307692307693, |
|
"grad_norm": 1.0688811540603638, |
|
"learning_rate": 2.672747223702045e-05, |
|
"loss": 0.1383, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11057692307692307, |
|
"grad_norm": 1.6178768873214722, |
|
"learning_rate": 2.6296071990054167e-05, |
|
"loss": 0.2288, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11298076923076923, |
|
"grad_norm": 1.1201609373092651, |
|
"learning_rate": 2.5841922957410875e-05, |
|
"loss": 0.045, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11538461538461539, |
|
"grad_norm": 0.6004620790481567, |
|
"learning_rate": 2.5365939734802973e-05, |
|
"loss": 0.0313, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11778846153846154, |
|
"grad_norm": 1.3882133960723877, |
|
"learning_rate": 2.4869080889095693e-05, |
|
"loss": 0.1741, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1201923076923077, |
|
"grad_norm": 1.3401930332183838, |
|
"learning_rate": 2.4352347027881003e-05, |
|
"loss": 0.1004, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1201923076923077, |
|
"eval_loss": 0.27240392565727234, |
|
"eval_runtime": 13.0087, |
|
"eval_samples_per_second": 13.529, |
|
"eval_steps_per_second": 3.382, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12259615384615384, |
|
"grad_norm": 1.4361435174942017, |
|
"learning_rate": 2.3816778784387097e-05, |
|
"loss": 0.3142, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 1.8554904460906982, |
|
"learning_rate": 2.3263454721781537e-05, |
|
"loss": 0.3407, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.12740384615384615, |
|
"grad_norm": 1.6358041763305664, |
|
"learning_rate": 2.2693489161088592e-05, |
|
"loss": 0.3693, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.12980769230769232, |
|
"grad_norm": 1.2670414447784424, |
|
"learning_rate": 2.210802993709498e-05, |
|
"loss": 0.2198, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13221153846153846, |
|
"grad_norm": 0.6511435508728027, |
|
"learning_rate": 2.1508256086763372e-05, |
|
"loss": 0.1036, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1346153846153846, |
|
"grad_norm": 0.963407576084137, |
|
"learning_rate": 2.0895375474808857e-05, |
|
"loss": 0.2021, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.13701923076923078, |
|
"grad_norm": 0.9860010743141174, |
|
"learning_rate": 2.0270622361220143e-05, |
|
"loss": 0.1904, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.13942307692307693, |
|
"grad_norm": 0.9964820146560669, |
|
"learning_rate": 1.963525491562421e-05, |
|
"loss": 0.2028, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.14182692307692307, |
|
"grad_norm": 1.0610746145248413, |
|
"learning_rate": 1.8990552683500128e-05, |
|
"loss": 0.2599, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.14423076923076922, |
|
"grad_norm": 1.155051827430725, |
|
"learning_rate": 1.8337814009344716e-05, |
|
"loss": 0.2026, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1466346153846154, |
|
"grad_norm": 1.2364827394485474, |
|
"learning_rate": 1.767835342197955e-05, |
|
"loss": 0.3114, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.14903846153846154, |
|
"grad_norm": 1.4400686025619507, |
|
"learning_rate": 1.7013498987264832e-05, |
|
"loss": 0.2923, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.15144230769230768, |
|
"grad_norm": 1.1243747472763062, |
|
"learning_rate": 1.6344589633551502e-05, |
|
"loss": 0.1879, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 1.5310815572738647, |
|
"learning_rate": 1.5672972455257726e-05, |
|
"loss": 0.3505, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.15625, |
|
"grad_norm": 1.378562331199646, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.2591, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.15865384615384615, |
|
"grad_norm": 1.165916085243225, |
|
"learning_rate": 1.4327027544742281e-05, |
|
"loss": 0.1593, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.16105769230769232, |
|
"grad_norm": 1.6357243061065674, |
|
"learning_rate": 1.36554103664485e-05, |
|
"loss": 0.2098, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.16346153846153846, |
|
"grad_norm": 0.8705613613128662, |
|
"learning_rate": 1.2986501012735174e-05, |
|
"loss": 0.2364, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.1658653846153846, |
|
"grad_norm": 1.1955105066299438, |
|
"learning_rate": 1.2321646578020452e-05, |
|
"loss": 0.2471, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.16826923076923078, |
|
"grad_norm": 0.6993159651756287, |
|
"learning_rate": 1.1662185990655285e-05, |
|
"loss": 0.1146, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17067307692307693, |
|
"grad_norm": 1.1817651987075806, |
|
"learning_rate": 1.1009447316499875e-05, |
|
"loss": 0.2651, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.17307692307692307, |
|
"grad_norm": 0.9822415709495544, |
|
"learning_rate": 1.036474508437579e-05, |
|
"loss": 0.1596, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17548076923076922, |
|
"grad_norm": 0.9577003717422485, |
|
"learning_rate": 9.729377638779859e-06, |
|
"loss": 0.1591, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.1778846153846154, |
|
"grad_norm": 1.019766926765442, |
|
"learning_rate": 9.104624525191147e-06, |
|
"loss": 0.1723, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.18028846153846154, |
|
"grad_norm": 0.8245567083358765, |
|
"learning_rate": 8.491743913236629e-06, |
|
"loss": 0.1188, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.18269230769230768, |
|
"grad_norm": 1.1882052421569824, |
|
"learning_rate": 7.89197006290502e-06, |
|
"loss": 0.1337, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.18509615384615385, |
|
"grad_norm": 1.280745506286621, |
|
"learning_rate": 7.30651083891141e-06, |
|
"loss": 0.2059, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.1875, |
|
"grad_norm": 1.2217323780059814, |
|
"learning_rate": 6.736545278218464e-06, |
|
"loss": 0.1497, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.18990384615384615, |
|
"grad_norm": 1.676806926727295, |
|
"learning_rate": 6.1832212156129045e-06, |
|
"loss": 0.3201, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": 0.6855142712593079, |
|
"learning_rate": 5.647652972118998e-06, |
|
"loss": 0.0886, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19471153846153846, |
|
"grad_norm": 0.7604547142982483, |
|
"learning_rate": 5.130919110904311e-06, |
|
"loss": 0.0928, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.1971153846153846, |
|
"grad_norm": 2.1085703372955322, |
|
"learning_rate": 4.6340602651970304e-06, |
|
"loss": 0.3099, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.19951923076923078, |
|
"grad_norm": 0.9963690042495728, |
|
"learning_rate": 4.158077042589129e-06, |
|
"loss": 0.1087, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.20192307692307693, |
|
"grad_norm": 0.759389340877533, |
|
"learning_rate": 3.7039280099458373e-06, |
|
"loss": 0.0829, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.20432692307692307, |
|
"grad_norm": 1.397197961807251, |
|
"learning_rate": 3.272527762979553e-06, |
|
"loss": 0.3221, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.20673076923076922, |
|
"grad_norm": 1.0370326042175293, |
|
"learning_rate": 2.86474508437579e-06, |
|
"loss": 0.1324, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.2091346153846154, |
|
"grad_norm": 1.5068193674087524, |
|
"learning_rate": 2.4814011941804603e-06, |
|
"loss": 0.1953, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.21153846153846154, |
|
"grad_norm": 1.1959068775177002, |
|
"learning_rate": 2.1232680959720085e-06, |
|
"loss": 0.156, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.21394230769230768, |
|
"grad_norm": 1.044714331626892, |
|
"learning_rate": 1.79106702214893e-06, |
|
"loss": 0.1243, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.21634615384615385, |
|
"grad_norm": 0.5632033348083496, |
|
"learning_rate": 1.4854669814637145e-06, |
|
"loss": 0.0374, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21875, |
|
"grad_norm": 1.1526877880096436, |
|
"learning_rate": 1.2070834117282414e-06, |
|
"loss": 0.1291, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.22115384615384615, |
|
"grad_norm": 1.084242820739746, |
|
"learning_rate": 9.56476940403942e-07, |
|
"loss": 0.1528, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.22355769230769232, |
|
"grad_norm": 0.7737883925437927, |
|
"learning_rate": 7.341522555726971e-07, |
|
"loss": 0.0896, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.22596153846153846, |
|
"grad_norm": 1.2481120824813843, |
|
"learning_rate": 5.405570895622014e-07, |
|
"loss": 0.1249, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.2283653846153846, |
|
"grad_norm": 0.6681073307991028, |
|
"learning_rate": 3.760813172726457e-07, |
|
"loss": 0.0611, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.23076923076923078, |
|
"grad_norm": 0.6517145037651062, |
|
"learning_rate": 2.41056171020555e-07, |
|
"loss": 0.0768, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.23317307692307693, |
|
"grad_norm": 0.5658121109008789, |
|
"learning_rate": 1.357535734809795e-07, |
|
"loss": 0.0474, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.23557692307692307, |
|
"grad_norm": 0.8237670660018921, |
|
"learning_rate": 6.038559007141397e-08, |
|
"loss": 0.1009, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.23798076923076922, |
|
"grad_norm": 0.4112774729728699, |
|
"learning_rate": 1.510400188028116e-08, |
|
"loss": 0.0223, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2403846153846154, |
|
"grad_norm": 0.7369557619094849, |
|
"learning_rate": 0.0, |
|
"loss": 0.0593, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2403846153846154, |
|
"eval_loss": 0.16742652654647827, |
|
"eval_runtime": 12.9878, |
|
"eval_samples_per_second": 13.551, |
|
"eval_steps_per_second": 3.388, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.5547217133568e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|