|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 30.303030303030305,
|
|
"eval_steps": 500,
|
|
"global_step": 1000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_loss": 0.6893180012702942,
|
|
"eval_runtime": 28.4889,
|
|
"eval_samples_per_second": 18.218,
|
|
"eval_steps_per_second": 1.158,
|
|
"step": 33
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_loss": 0.3626828193664551,
|
|
"eval_runtime": 29.737,
|
|
"eval_samples_per_second": 17.453,
|
|
"eval_steps_per_second": 1.11,
|
|
"step": 66
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_loss": 0.21266968548297882,
|
|
"eval_runtime": 28.4545,
|
|
"eval_samples_per_second": 18.24,
|
|
"eval_steps_per_second": 1.16,
|
|
"step": 99
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_loss": 0.1356026828289032,
|
|
"eval_runtime": 28.8969,
|
|
"eval_samples_per_second": 17.96,
|
|
"eval_steps_per_second": 1.142,
|
|
"step": 132
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_loss": 0.09372802078723907,
|
|
"eval_runtime": 42.6352,
|
|
"eval_samples_per_second": 12.173,
|
|
"eval_steps_per_second": 0.774,
|
|
"step": 165
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_loss": 0.06550699472427368,
|
|
"eval_runtime": 30.8112,
|
|
"eval_samples_per_second": 16.845,
|
|
"eval_steps_per_second": 1.071,
|
|
"step": 198
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_loss": 0.04331357032060623,
|
|
"eval_runtime": 138.921,
|
|
"eval_samples_per_second": 3.736,
|
|
"eval_steps_per_second": 0.238,
|
|
"step": 231
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_loss": 0.030439287424087524,
|
|
"eval_runtime": 29.4747,
|
|
"eval_samples_per_second": 17.608,
|
|
"eval_steps_per_second": 1.12,
|
|
"step": 264
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_loss": 0.02328161522746086,
|
|
"eval_runtime": 42.9504,
|
|
"eval_samples_per_second": 12.084,
|
|
"eval_steps_per_second": 0.768,
|
|
"step": 297
|
|
},
|
|
{
|
|
"epoch": 10.0,
|
|
"eval_loss": 0.015175359323620796,
|
|
"eval_runtime": 34.7922,
|
|
"eval_samples_per_second": 14.917,
|
|
"eval_steps_per_second": 0.948,
|
|
"step": 330
|
|
},
|
|
{
|
|
"epoch": 11.0,
|
|
"eval_loss": 0.011942080222070217,
|
|
"eval_runtime": 33.7878,
|
|
"eval_samples_per_second": 15.361,
|
|
"eval_steps_per_second": 0.977,
|
|
"step": 363
|
|
},
|
|
{
|
|
"epoch": 12.0,
|
|
"eval_loss": 0.011739258654415607,
|
|
"eval_runtime": 33.1657,
|
|
"eval_samples_per_second": 15.649,
|
|
"eval_steps_per_second": 0.995,
|
|
"step": 396
|
|
},
|
|
{
|
|
"epoch": 13.0,
|
|
"eval_loss": 0.00850769504904747,
|
|
"eval_runtime": 26.9611,
|
|
"eval_samples_per_second": 19.25,
|
|
"eval_steps_per_second": 1.224,
|
|
"step": 429
|
|
},
|
|
{
|
|
"epoch": 14.0,
|
|
"eval_loss": 0.008818953298032284,
|
|
"eval_runtime": 25.3516,
|
|
"eval_samples_per_second": 20.472,
|
|
"eval_steps_per_second": 1.302,
|
|
"step": 462
|
|
},
|
|
{
|
|
"epoch": 15.0,
|
|
"eval_loss": 0.0071915010921657085,
|
|
"eval_runtime": 29.0211,
|
|
"eval_samples_per_second": 17.884,
|
|
"eval_steps_per_second": 1.137,
|
|
"step": 495
|
|
},
|
|
{
|
|
"epoch": 15.15,
|
|
"learning_rate": 1.2424242424242425e-05,
|
|
"loss": 0.172,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 16.0,
|
|
"eval_loss": 0.0059341141022741795,
|
|
"eval_runtime": 27.1402,
|
|
"eval_samples_per_second": 19.123,
|
|
"eval_steps_per_second": 1.216,
|
|
"step": 528
|
|
},
|
|
{
|
|
"epoch": 17.0,
|
|
"eval_loss": 0.0058893621899187565,
|
|
"eval_runtime": 65.963,
|
|
"eval_samples_per_second": 7.868,
|
|
"eval_steps_per_second": 0.5,
|
|
"step": 561
|
|
},
|
|
{
|
|
"epoch": 18.0,
|
|
"eval_loss": 0.005850458517670631,
|
|
"eval_runtime": 41.3311,
|
|
"eval_samples_per_second": 12.557,
|
|
"eval_steps_per_second": 0.798,
|
|
"step": 594
|
|
},
|
|
{
|
|
"epoch": 19.0,
|
|
"eval_loss": 0.004295774735510349,
|
|
"eval_runtime": 46.3744,
|
|
"eval_samples_per_second": 11.192,
|
|
"eval_steps_per_second": 0.712,
|
|
"step": 627
|
|
},
|
|
{
|
|
"epoch": 20.0,
|
|
"eval_loss": 0.004227998200803995,
|
|
"eval_runtime": 41.1438,
|
|
"eval_samples_per_second": 12.614,
|
|
"eval_steps_per_second": 0.802,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 21.0,
|
|
"eval_loss": 0.004431780893355608,
|
|
"eval_runtime": 41.907,
|
|
"eval_samples_per_second": 12.385,
|
|
"eval_steps_per_second": 0.787,
|
|
"step": 693
|
|
},
|
|
{
|
|
"epoch": 22.0,
|
|
"eval_loss": 0.0034626370761543512,
|
|
"eval_runtime": 41.1621,
|
|
"eval_samples_per_second": 12.609,
|
|
"eval_steps_per_second": 0.802,
|
|
"step": 726
|
|
},
|
|
{
|
|
"epoch": 23.0,
|
|
"eval_loss": 0.0033035443630069494,
|
|
"eval_runtime": 44.2482,
|
|
"eval_samples_per_second": 11.729,
|
|
"eval_steps_per_second": 0.746,
|
|
"step": 759
|
|
},
|
|
{
|
|
"epoch": 24.0,
|
|
"eval_loss": 0.003542479360476136,
|
|
"eval_runtime": 42.5796,
|
|
"eval_samples_per_second": 12.189,
|
|
"eval_steps_per_second": 0.775,
|
|
"step": 792
|
|
},
|
|
{
|
|
"epoch": 25.0,
|
|
"eval_loss": 0.00354805332608521,
|
|
"eval_runtime": 40.0541,
|
|
"eval_samples_per_second": 12.957,
|
|
"eval_steps_per_second": 0.824,
|
|
"step": 825
|
|
},
|
|
{
|
|
"epoch": 26.0,
|
|
"eval_loss": 0.002963385311886668,
|
|
"eval_runtime": 41.546,
|
|
"eval_samples_per_second": 12.492,
|
|
"eval_steps_per_second": 0.794,
|
|
"step": 858
|
|
},
|
|
{
|
|
"epoch": 27.0,
|
|
"eval_loss": 0.0030358233489096165,
|
|
"eval_runtime": 48.7828,
|
|
"eval_samples_per_second": 10.639,
|
|
"eval_steps_per_second": 0.676,
|
|
"step": 891
|
|
},
|
|
{
|
|
"epoch": 28.0,
|
|
"eval_loss": 0.002670546527951956,
|
|
"eval_runtime": 40.7799,
|
|
"eval_samples_per_second": 12.727,
|
|
"eval_steps_per_second": 0.809,
|
|
"step": 924
|
|
},
|
|
{
|
|
"epoch": 29.0,
|
|
"eval_loss": 0.002638536971062422,
|
|
"eval_runtime": 69.2191,
|
|
"eval_samples_per_second": 7.498,
|
|
"eval_steps_per_second": 0.477,
|
|
"step": 957
|
|
},
|
|
{
|
|
"epoch": 30.0,
|
|
"eval_loss": 0.002552107907831669,
|
|
"eval_runtime": 47.5572,
|
|
"eval_samples_per_second": 10.913,
|
|
"eval_steps_per_second": 0.694,
|
|
"step": 990
|
|
},
|
|
{
|
|
"epoch": 30.3,
|
|
"learning_rate": 4.848484848484849e-06,
|
|
"loss": 0.0077,
|
|
"step": 1000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 1320,
|
|
"num_train_epochs": 40,
|
|
"save_steps": 500,
|
|
"total_flos": 278856790097838.0,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|