HW1 / run-3 /checkpoint-2675 /trainer_state.json
Eva1209's picture
Training in progress, epoch 5
c5bd888 verified
raw
history blame
2.75 kB
{
"best_metric": 0.47042279469851556,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-3/checkpoint-2675",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 2675,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.93,
"grad_norm": 5.041173934936523,
"learning_rate": 4.466779044509044e-06,
"loss": 0.5565,
"step": 500
},
{
"epoch": 1.0,
"eval_loss": 0.5430340766906738,
"eval_matthews_correlation": 0.3453182575293756,
"eval_runtime": 0.8193,
"eval_samples_per_second": 1273.053,
"eval_steps_per_second": 80.557,
"step": 535
},
{
"epoch": 1.87,
"grad_norm": 9.476218223571777,
"learning_rate": 3.4399332871506427e-06,
"loss": 0.4477,
"step": 1000
},
{
"epoch": 2.0,
"eval_loss": 0.5347909927368164,
"eval_matthews_correlation": 0.4329619258860067,
"eval_runtime": 1.3398,
"eval_samples_per_second": 778.476,
"eval_steps_per_second": 49.261,
"step": 1070
},
{
"epoch": 2.8,
"grad_norm": 13.160967826843262,
"learning_rate": 2.413087529792242e-06,
"loss": 0.3814,
"step": 1500
},
{
"epoch": 3.0,
"eval_loss": 0.518277108669281,
"eval_matthews_correlation": 0.4491418956579208,
"eval_runtime": 0.952,
"eval_samples_per_second": 1095.631,
"eval_steps_per_second": 69.33,
"step": 1605
},
{
"epoch": 3.74,
"grad_norm": 10.544774055480957,
"learning_rate": 1.386241772433841e-06,
"loss": 0.3476,
"step": 2000
},
{
"epoch": 4.0,
"eval_loss": 0.5457636713981628,
"eval_matthews_correlation": 0.4587117762521935,
"eval_runtime": 2.1058,
"eval_samples_per_second": 495.294,
"eval_steps_per_second": 31.342,
"step": 2140
},
{
"epoch": 4.67,
"grad_norm": 8.001981735229492,
"learning_rate": 3.5939601507544025e-07,
"loss": 0.3086,
"step": 2500
},
{
"epoch": 5.0,
"eval_loss": 0.5409175157546997,
"eval_matthews_correlation": 0.47042279469851556,
"eval_runtime": 0.7084,
"eval_samples_per_second": 1472.267,
"eval_steps_per_second": 93.164,
"step": 2675
}
],
"logging_steps": 500,
"max_steps": 2675,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 214118543854920.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": {
"learning_rate": 5.493624801867445e-06,
"num_train_epochs": 5,
"per_device_train_batch_size": 16,
"seed": 31
}
}