nielsr's picture
nielsr HF staff
End of training
083e767
raw
history blame
3.87 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 50.0,
"global_step": 2700,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.85,
"learning_rate": 4.814814814814815e-05,
"loss": 2.6107,
"step": 100
},
{
"epoch": 3.7,
"learning_rate": 4.62962962962963e-05,
"loss": 1.8688,
"step": 200
},
{
"epoch": 5.56,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.5544,
"step": 300
},
{
"epoch": 7.41,
"learning_rate": 4.259259259259259e-05,
"loss": 1.3367,
"step": 400
},
{
"epoch": 9.26,
"learning_rate": 4.074074074074074e-05,
"loss": 1.2102,
"step": 500
},
{
"epoch": 11.11,
"learning_rate": 3.888888888888889e-05,
"loss": 1.1085,
"step": 600
},
{
"epoch": 12.96,
"learning_rate": 3.7037037037037037e-05,
"loss": 1.0013,
"step": 700
},
{
"epoch": 14.81,
"learning_rate": 3.518518518518519e-05,
"loss": 0.9601,
"step": 800
},
{
"epoch": 16.67,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.9108,
"step": 900
},
{
"epoch": 18.52,
"learning_rate": 3.148148148148148e-05,
"loss": 0.8639,
"step": 1000
},
{
"epoch": 20.37,
"learning_rate": 2.962962962962963e-05,
"loss": 0.8091,
"step": 1100
},
{
"epoch": 22.22,
"learning_rate": 2.777777777777778e-05,
"loss": 0.8038,
"step": 1200
},
{
"epoch": 24.07,
"learning_rate": 2.5925925925925925e-05,
"loss": 0.7915,
"step": 1300
},
{
"epoch": 25.93,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.7648,
"step": 1400
},
{
"epoch": 27.78,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.7557,
"step": 1500
},
{
"epoch": 29.63,
"learning_rate": 2.037037037037037e-05,
"loss": 0.7004,
"step": 1600
},
{
"epoch": 31.48,
"learning_rate": 1.8518518518518518e-05,
"loss": 0.6927,
"step": 1700
},
{
"epoch": 33.33,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.6847,
"step": 1800
},
{
"epoch": 35.19,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.6828,
"step": 1900
},
{
"epoch": 37.04,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.6526,
"step": 2000
},
{
"epoch": 38.89,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.6612,
"step": 2100
},
{
"epoch": 40.74,
"learning_rate": 9.259259259259259e-06,
"loss": 0.6528,
"step": 2200
},
{
"epoch": 42.59,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.6264,
"step": 2300
},
{
"epoch": 44.44,
"learning_rate": 5.555555555555556e-06,
"loss": 0.6403,
"step": 2400
},
{
"epoch": 46.3,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.6413,
"step": 2500
},
{
"epoch": 48.15,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.6121,
"step": 2600
},
{
"epoch": 50.0,
"learning_rate": 0.0,
"loss": 0.6161,
"step": 2700
},
{
"epoch": 50.0,
"step": 2700,
"total_flos": 7.4663889076224e+17,
"train_loss": 0.9338362375895183,
"train_runtime": 1300.5624,
"train_samples_per_second": 32.678,
"train_steps_per_second": 2.076
}
],
"max_steps": 2700,
"num_train_epochs": 50,
"total_flos": 7.4663889076224e+17,
"trial_name": null,
"trial_params": null
}