|
{ |
|
"best_metric": 1.3581866025924683, |
|
"best_model_checkpoint": "flan-t5-small-prompt/checkpoint-11702", |
|
"epoch": 2.0, |
|
"global_step": 11702, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.91454452230388e-05, |
|
"loss": 2.2473, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.82908904460776e-05, |
|
"loss": 1.9761, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.7436335669116395e-05, |
|
"loss": 1.8802, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.658178089215519e-05, |
|
"loss": 1.8477, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.572722611519399e-05, |
|
"loss": 1.8029, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.487267133823278e-05, |
|
"loss": 1.7633, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.401811656127158e-05, |
|
"loss": 1.7704, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.3163561784310376e-05, |
|
"loss": 1.7068, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.230900700734917e-05, |
|
"loss": 1.732, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.145445223038797e-05, |
|
"loss": 1.7168, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.059989745342677e-05, |
|
"loss": 1.6812, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_gen_len": 14.935384615384615, |
|
"eval_loss": 1.4240827560424805, |
|
"eval_rouge1": 52.0316, |
|
"eval_rouge2": 33.7896, |
|
"eval_rougeL": 49.2826, |
|
"eval_rougeLsum": 49.8238, |
|
"eval_runtime": 375.6869, |
|
"eval_samples_per_second": 13.841, |
|
"eval_steps_per_second": 1.73, |
|
"step": 5851 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.9745342676465566e-05, |
|
"loss": 1.6711, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.889078789950436e-05, |
|
"loss": 1.651, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.803623312254316e-05, |
|
"loss": 1.6327, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.718167834558196e-05, |
|
"loss": 1.6171, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 3.6327123568620756e-05, |
|
"loss": 1.6142, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 3.5472568791659546e-05, |
|
"loss": 1.5843, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 3.4618014014698344e-05, |
|
"loss": 1.6075, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 3.376345923773714e-05, |
|
"loss": 1.5917, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 3.290890446077594e-05, |
|
"loss": 1.5797, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 3.205434968381473e-05, |
|
"loss": 1.6037, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 3.119979490685353e-05, |
|
"loss": 1.5926, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 3.0345240129892328e-05, |
|
"loss": 1.5684, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_gen_len": 14.967115384615385, |
|
"eval_loss": 1.3581866025924683, |
|
"eval_rouge1": 53.5315, |
|
"eval_rouge2": 35.1883, |
|
"eval_rougeL": 50.7765, |
|
"eval_rougeLsum": 51.3334, |
|
"eval_runtime": 392.0061, |
|
"eval_samples_per_second": 13.265, |
|
"eval_steps_per_second": 1.658, |
|
"step": 11702 |
|
} |
|
], |
|
"max_steps": 29255, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.7400094802313216e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|