|
{ |
|
"best_metric": 0.32178911566734314, |
|
"best_model_checkpoint": "/tmp/model/checkpoint-2500", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.266666666666667e-06, |
|
"loss": 0.6965, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.6533333333333333e-05, |
|
"loss": 0.5176, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.486666666666667e-05, |
|
"loss": 0.4482, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.313333333333333e-05, |
|
"loss": 0.4278, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.146666666666667e-05, |
|
"loss": 0.4267, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.973333333333334e-05, |
|
"loss": 0.4106, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.910370370370371e-05, |
|
"loss": 0.4548, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.817777777777778e-05, |
|
"loss": 0.3838, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.725185185185185e-05, |
|
"loss": 0.3781, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.6325925925925926e-05, |
|
"loss": 0.4206, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.5400000000000006e-05, |
|
"loss": 0.3933, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.447407407407407e-05, |
|
"loss": 0.4357, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.354814814814815e-05, |
|
"loss": 0.3704, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.2622222222222224e-05, |
|
"loss": 0.3859, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.16962962962963e-05, |
|
"loss": 0.3682, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.077037037037037e-05, |
|
"loss": 0.3852, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.984444444444444e-05, |
|
"loss": 0.3641, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.891851851851852e-05, |
|
"loss": 0.3814, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.7992592592592596e-05, |
|
"loss": 0.3471, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.706666666666667e-05, |
|
"loss": 0.3565, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8518, |
|
"eval_auc": 0.94403024, |
|
"eval_f1": 0.8431746031746032, |
|
"eval_loss": 0.32178911566734314, |
|
"eval_precision": 0.8952808988764045, |
|
"eval_recall": 0.7968, |
|
"eval_runtime": 10.5615, |
|
"eval_samples_per_second": 473.418, |
|
"eval_steps_per_second": 29.636, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 125, |
|
"max_steps": 7500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 1315555276800000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|