|
{ |
|
"best_metric": 0.5238644265545664, |
|
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-6414", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 6414, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 7.291004657745361, |
|
"learning_rate": 1.351268050159641e-05, |
|
"loss": 0.5812, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 14.129287719726562, |
|
"learning_rate": 1.284964416687627e-05, |
|
"loss": 0.5391, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 4.682578086853027, |
|
"learning_rate": 1.2186607832156134e-05, |
|
"loss": 0.5557, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 1.5674272775650024, |
|
"learning_rate": 1.1523571497435995e-05, |
|
"loss": 0.5335, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.5571191906929016, |
|
"eval_matthews_correlation": 0.429931898675694, |
|
"eval_runtime": 0.7111, |
|
"eval_samples_per_second": 1466.739, |
|
"eval_steps_per_second": 92.814, |
|
"step": 2138 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 0.9350309371948242, |
|
"learning_rate": 1.0860535162715857e-05, |
|
"loss": 0.4251, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.13398246467113495, |
|
"learning_rate": 1.0197498827995719e-05, |
|
"loss": 0.494, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 35.02979278564453, |
|
"learning_rate": 9.534462493275582e-06, |
|
"loss": 0.4725, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 7.028965950012207, |
|
"learning_rate": 8.871426158555444e-06, |
|
"loss": 0.4842, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.9164985418319702, |
|
"eval_matthews_correlation": 0.48570761109759075, |
|
"eval_runtime": 0.7887, |
|
"eval_samples_per_second": 1322.437, |
|
"eval_steps_per_second": 83.683, |
|
"step": 4276 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 0.19327987730503082, |
|
"learning_rate": 8.208389823835306e-06, |
|
"loss": 0.3791, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"grad_norm": 0.06137663871049881, |
|
"learning_rate": 7.545353489115168e-06, |
|
"loss": 0.2895, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"grad_norm": 0.08592221885919571, |
|
"learning_rate": 6.88231715439503e-06, |
|
"loss": 0.302, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"grad_norm": 0.3166104257106781, |
|
"learning_rate": 6.219280819674893e-06, |
|
"loss": 0.3472, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.0048580169677734, |
|
"eval_matthews_correlation": 0.5238644265545664, |
|
"eval_runtime": 0.7244, |
|
"eval_samples_per_second": 1439.815, |
|
"eval_steps_per_second": 91.11, |
|
"step": 6414 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 10690, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 97824845378964.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 1.4175716836316547e-05, |
|
"num_train_epochs": 5, |
|
"per_device_train_batch_size": 4, |
|
"seed": 20 |
|
} |
|
} |
|
|