roberta-base-coqa / trainer_state.json
rooftopcoder's picture
End of training
628f3f5
raw
history blame
3.73 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9694859717386852,
"global_step": 14500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"learning_rate": 0.00025904995904995904,
"loss": 5.6935,
"step": 500
},
{
"epoch": 0.2,
"learning_rate": 0.00021809991809991808,
"loss": 5.9538,
"step": 1000
},
{
"epoch": 0.31,
"learning_rate": 0.00017714987714987715,
"loss": 5.9526,
"step": 1500
},
{
"epoch": 0.41,
"learning_rate": 0.0001361998361998362,
"loss": 5.9532,
"step": 2000
},
{
"epoch": 0.51,
"learning_rate": 9.524979524979524e-05,
"loss": 5.9517,
"step": 2500
},
{
"epoch": 0.61,
"learning_rate": 5.429975429975429e-05,
"loss": 5.9514,
"step": 3000
},
{
"epoch": 0.72,
"learning_rate": 1.3349713349713349e-05,
"loss": 5.9512,
"step": 3500
},
{
"epoch": 0.82,
"learning_rate": 0.0,
"loss": 5.9513,
"step": 4000
},
{
"epoch": 0.92,
"learning_rate": 0.0,
"loss": 5.9513,
"step": 4500
},
{
"epoch": 1.02,
"learning_rate": 0.0,
"loss": 5.9516,
"step": 5000
},
{
"epoch": 1.13,
"learning_rate": 0.0,
"loss": 5.9515,
"step": 5500
},
{
"epoch": 1.23,
"learning_rate": 0.0,
"loss": 5.9514,
"step": 6000
},
{
"epoch": 1.33,
"learning_rate": 0.0,
"loss": 5.9518,
"step": 6500
},
{
"epoch": 1.43,
"learning_rate": 0.0,
"loss": 5.9514,
"step": 7000
},
{
"epoch": 1.54,
"learning_rate": 0.0,
"loss": 5.9513,
"step": 7500
},
{
"epoch": 1.64,
"learning_rate": 0.0,
"loss": 5.9514,
"step": 8000
},
{
"epoch": 1.74,
"learning_rate": 0.0,
"loss": 5.9516,
"step": 8500
},
{
"epoch": 1.84,
"learning_rate": 0.0,
"loss": 5.9515,
"step": 9000
},
{
"epoch": 1.95,
"learning_rate": 0.0,
"loss": 5.9512,
"step": 9500
},
{
"epoch": 2.05,
"learning_rate": 0.0,
"loss": 5.9512,
"step": 10000
},
{
"epoch": 2.15,
"learning_rate": 0.0,
"loss": 5.9517,
"step": 10500
},
{
"epoch": 2.25,
"learning_rate": 0.0,
"loss": 5.9515,
"step": 11000
},
{
"epoch": 2.36,
"learning_rate": 0.0,
"loss": 5.9518,
"step": 11500
},
{
"epoch": 2.46,
"learning_rate": 0.0,
"loss": 5.9516,
"step": 12000
},
{
"epoch": 2.56,
"learning_rate": 0.0,
"loss": 5.9517,
"step": 12500
},
{
"epoch": 2.66,
"learning_rate": 0.0,
"loss": 5.9511,
"step": 13000
},
{
"epoch": 2.76,
"learning_rate": 0.0,
"loss": 5.9508,
"step": 13500
},
{
"epoch": 2.87,
"learning_rate": 0.0,
"loss": 5.9514,
"step": 14000
},
{
"epoch": 2.97,
"learning_rate": 0.0,
"loss": 5.9511,
"step": 14500
},
{
"epoch": 2.97,
"step": 14500,
"total_flos": 9.092500022196634e+16,
"train_loss": 0.0,
"train_runtime": 32.9708,
"train_samples_per_second": 14216.232,
"train_steps_per_second": 111.098
}
],
"max_steps": 3663,
"num_train_epochs": 3,
"total_flos": 9.092500022196634e+16,
"trial_name": null,
"trial_params": null
}