|
{ |
|
"best_metric": 1.0448113679885864, |
|
"best_model_checkpoint": "th_cl_5epochs_lora_pos_neg/checkpoint-96", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 160, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 49.992950439453125, |
|
"learning_rate": 9.375e-05, |
|
"loss": 1.0996, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 29.154659271240234, |
|
"learning_rate": 8.75e-05, |
|
"loss": 1.1576, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 39.784446716308594, |
|
"learning_rate": 8.125000000000001e-05, |
|
"loss": 1.1552, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5111111111111111, |
|
"eval_balanced_accuracy": 0.5118577075098814, |
|
"eval_loss": 1.2024661302566528, |
|
"eval_runtime": 84.971, |
|
"eval_samples_per_second": 0.53, |
|
"eval_steps_per_second": 0.071, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 44.496421813964844, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7388, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 39.3575325012207, |
|
"learning_rate": 6.875e-05, |
|
"loss": 0.5455, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 24.707012176513672, |
|
"learning_rate": 6.25e-05, |
|
"loss": 0.7688, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.4888888888888889, |
|
"eval_balanced_accuracy": 0.4939271255060729, |
|
"eval_loss": 1.1111503839492798, |
|
"eval_runtime": 84.9531, |
|
"eval_samples_per_second": 0.53, |
|
"eval_steps_per_second": 0.071, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.1875, |
|
"grad_norm": 10.749614715576172, |
|
"learning_rate": 5.6250000000000005e-05, |
|
"loss": 0.4859, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 24.55562973022461, |
|
"learning_rate": 5e-05, |
|
"loss": 0.41, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.8125, |
|
"grad_norm": 38.890281677246094, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.4098, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.4888888888888889, |
|
"eval_balanced_accuracy": 0.48814229249011853, |
|
"eval_loss": 1.0448113679885864, |
|
"eval_runtime": 85.1798, |
|
"eval_samples_per_second": 0.528, |
|
"eval_steps_per_second": 0.07, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 11.055780410766602, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.341, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.4375, |
|
"grad_norm": 18.11351203918457, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.2338, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 10.180174827575684, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.1714, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.4888888888888889, |
|
"eval_balanced_accuracy": 0.48814229249011853, |
|
"eval_loss": 1.0591027736663818, |
|
"eval_runtime": 84.9785, |
|
"eval_samples_per_second": 0.53, |
|
"eval_steps_per_second": 0.071, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 4.0625, |
|
"grad_norm": 6.068151473999023, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.157, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.375, |
|
"grad_norm": 2.7363812923431396, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0971, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.6875, |
|
"grad_norm": 13.663867950439453, |
|
"learning_rate": 6.25e-06, |
|
"loss": 0.1185, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 20.521446228027344, |
|
"learning_rate": 0.0, |
|
"loss": 0.1392, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.4888888888888889, |
|
"eval_balanced_accuracy": 0.48814229249011853, |
|
"eval_loss": 1.0568149089813232, |
|
"eval_runtime": 85.0351, |
|
"eval_samples_per_second": 0.529, |
|
"eval_steps_per_second": 0.071, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 160, |
|
"total_flos": 2.696144222158848e+16, |
|
"train_loss": 0.5018329162150621, |
|
"train_runtime": 8068.1557, |
|
"train_samples_per_second": 0.156, |
|
"train_steps_per_second": 0.02 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 160, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 2.696144222158848e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|