|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.08526360665056132, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001136848088674151, |
|
"grad_norm": 13.681441307067871, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 19.3455, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001136848088674151, |
|
"eval_loss": 2.1432127952575684, |
|
"eval_runtime": 38.1789, |
|
"eval_samples_per_second": 19.409, |
|
"eval_steps_per_second": 9.717, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002273696177348302, |
|
"grad_norm": 8.84878158569336, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 15.1115, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003410544266022453, |
|
"grad_norm": 9.986258506774902, |
|
"learning_rate": 0.0001, |
|
"loss": 18.6546, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004547392354696604, |
|
"grad_norm": 9.296355247497559, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 15.2052, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005684240443370754, |
|
"grad_norm": 8.245156288146973, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 14.842, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006821088532044906, |
|
"grad_norm": 8.676321029663086, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 15.0336, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007957936620719057, |
|
"grad_norm": 10.448345184326172, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 12.581, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009094784709393207, |
|
"grad_norm": 8.366747856140137, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 12.6146, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010231632798067358, |
|
"grad_norm": 6.977192401885986, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 15.2944, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.011368480886741509, |
|
"grad_norm": 8.217976570129395, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 15.2756, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01250532897541566, |
|
"grad_norm": 9.124979019165039, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 11.9569, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.013642177064089812, |
|
"grad_norm": 8.218945503234863, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 13.2507, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.014779025152763962, |
|
"grad_norm": 9.404376029968262, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 17.7727, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.015915873241438113, |
|
"grad_norm": 8.809803009033203, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 15.4318, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.017052721330112264, |
|
"grad_norm": 14.043539047241211, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 20.6555, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018189569418786414, |
|
"grad_norm": 10.740511894226074, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 13.5507, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.019326417507460565, |
|
"grad_norm": 8.008563041687012, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 15.5895, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.020463265596134716, |
|
"grad_norm": 12.661676406860352, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 15.2206, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.021600113684808867, |
|
"grad_norm": 7.788751125335693, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 15.9487, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.022736961773483017, |
|
"grad_norm": 8.766595840454102, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 18.9548, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.023873809862157168, |
|
"grad_norm": 7.193807601928711, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 17.7629, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02501065795083132, |
|
"grad_norm": 7.72016716003418, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 17.2494, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.026147506039505473, |
|
"grad_norm": 7.836511135101318, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 17.594, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.027284354128179623, |
|
"grad_norm": 8.430022239685059, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 17.5101, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.028421202216853774, |
|
"grad_norm": 7.842438220977783, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 14.5361, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.028421202216853774, |
|
"eval_loss": 1.724392056465149, |
|
"eval_runtime": 38.1608, |
|
"eval_samples_per_second": 19.418, |
|
"eval_steps_per_second": 9.722, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.029558050305527925, |
|
"grad_norm": 7.136037826538086, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 17.5904, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.030694898394202075, |
|
"grad_norm": 8.120686531066895, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 15.6992, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.031831746482876226, |
|
"grad_norm": 8.6294527053833, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 16.7682, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03296859457155037, |
|
"grad_norm": 8.904825210571289, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 18.3941, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03410544266022453, |
|
"grad_norm": 8.436539649963379, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 15.865, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03524229074889868, |
|
"grad_norm": 8.646078109741211, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 17.4423, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03637913883757283, |
|
"grad_norm": 8.919268608093262, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 14.9074, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03751598692624698, |
|
"grad_norm": 8.72852897644043, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 16.8692, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03865283501492113, |
|
"grad_norm": 8.736539840698242, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 11.9083, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.039789683103595284, |
|
"grad_norm": 9.58365535736084, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 13.4663, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04092653119226943, |
|
"grad_norm": 10.65027141571045, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 13.1416, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.042063379280943586, |
|
"grad_norm": 9.007448196411133, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 16.3553, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04320022736961773, |
|
"grad_norm": 9.968982696533203, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 13.711, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04433707545829189, |
|
"grad_norm": 11.036783218383789, |
|
"learning_rate": 5e-05, |
|
"loss": 14.8751, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.045473923546966034, |
|
"grad_norm": 9.898478507995605, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 11.9162, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04661077163564019, |
|
"grad_norm": 10.537850379943848, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 14.4648, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.047747619724314336, |
|
"grad_norm": 10.226863861083984, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.943, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04888446781298849, |
|
"grad_norm": 13.692229270935059, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 7.654, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05002131590166264, |
|
"grad_norm": 10.681045532226562, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 10.2153, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05115816399033679, |
|
"grad_norm": 12.138427734375, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 7.9938, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.052295012079010945, |
|
"grad_norm": 13.93451976776123, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 6.4668, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05343186016768509, |
|
"grad_norm": 11.52592658996582, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 7.7896, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05456870825635925, |
|
"grad_norm": 14.795572280883789, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 7.502, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.055705556345033394, |
|
"grad_norm": 13.952139854431152, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 6.6685, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05684240443370755, |
|
"grad_norm": 16.22222900390625, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 5.4531, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05684240443370755, |
|
"eval_loss": 1.7854145765304565, |
|
"eval_runtime": 38.1943, |
|
"eval_samples_per_second": 19.401, |
|
"eval_steps_per_second": 9.714, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.057979252522381695, |
|
"grad_norm": 31.600948333740234, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 20.401, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.05911610061105585, |
|
"grad_norm": 23.095199584960938, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 16.3515, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.06025294869973, |
|
"grad_norm": 19.465106964111328, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 11.0436, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.06138979678840415, |
|
"grad_norm": 24.496065139770508, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 15.2372, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0625266448770783, |
|
"grad_norm": 19.609771728515625, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 12.5162, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06366349296575245, |
|
"grad_norm": 15.845856666564941, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 15.2344, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0648003410544266, |
|
"grad_norm": 19.62835693359375, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 16.5892, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.06593718914310075, |
|
"grad_norm": 14.156807899475098, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 13.7909, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.06707403723177491, |
|
"grad_norm": 16.480527877807617, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 11.8879, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.06821088532044906, |
|
"grad_norm": 11.38010311126709, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 13.3756, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0693477334091232, |
|
"grad_norm": 10.232706069946289, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 15.2605, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.07048458149779736, |
|
"grad_norm": 11.701848983764648, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 19.6999, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.07162142958647151, |
|
"grad_norm": 11.662482261657715, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 18.2158, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.07275827767514566, |
|
"grad_norm": 11.12790584564209, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 17.7721, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0738951257638198, |
|
"grad_norm": 8.93850040435791, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 16.637, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07503197385249397, |
|
"grad_norm": 9.515388488769531, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 15.8094, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.07616882194116811, |
|
"grad_norm": 10.64154052734375, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 15.4739, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.07730567002984226, |
|
"grad_norm": 11.352025032043457, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 18.6491, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.07844251811851641, |
|
"grad_norm": 10.24422550201416, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 17.2373, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.07957936620719057, |
|
"grad_norm": 9.423773765563965, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 15.38, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08071621429586472, |
|
"grad_norm": 9.517950057983398, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 12.7865, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.08185306238453886, |
|
"grad_norm": 9.021355628967285, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 17.7134, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.08298991047321301, |
|
"grad_norm": 9.259770393371582, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 17.2819, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.08412675856188717, |
|
"grad_norm": 11.53695011138916, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 15.3022, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.08526360665056132, |
|
"grad_norm": 8.795744895935059, |
|
"learning_rate": 0.0, |
|
"loss": 17.66, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08526360665056132, |
|
"eval_loss": 1.7146401405334473, |
|
"eval_runtime": 38.162, |
|
"eval_samples_per_second": 19.417, |
|
"eval_steps_per_second": 9.722, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4666552800509952.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|