adapters-gemma-bnb8-QLORA-super_glue-wsc
/
trainer_state-gemma-bnb8-QLORA-super_glue-wsc-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 5.925925925925926, | |
"eval_steps": 1, | |
"global_step": 10, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 1.0, | |
"grad_norm": 267.1435852050781, | |
"learning_rate": 2.5e-05, | |
"loss": 3.6544, | |
"step": 1 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 3.440249443054199, | |
"eval_runtime": 10.753, | |
"eval_samples_per_second": 12.276, | |
"eval_steps_per_second": 0.651, | |
"step": 1 | |
}, | |
{ | |
"epoch": 1.1851851851851851, | |
"grad_norm": 58.776756286621094, | |
"learning_rate": 5e-05, | |
"loss": 0.7994, | |
"step": 2 | |
}, | |
{ | |
"epoch": 1.1851851851851851, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 1.934098243713379, | |
"eval_runtime": 10.7499, | |
"eval_samples_per_second": 12.279, | |
"eval_steps_per_second": 0.651, | |
"step": 2 | |
}, | |
{ | |
"epoch": 2.0, | |
"grad_norm": 195.9375457763672, | |
"learning_rate": 4.375e-05, | |
"loss": 1.5175, | |
"step": 3 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_accuracy": 0.3787878787878788, | |
"eval_loss": 2.742687702178955, | |
"eval_runtime": 10.7555, | |
"eval_samples_per_second": 12.273, | |
"eval_steps_per_second": 0.651, | |
"step": 3 | |
}, | |
{ | |
"epoch": 2.3703703703703702, | |
"grad_norm": 106.63079071044922, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 0.795, | |
"step": 4 | |
}, | |
{ | |
"epoch": 2.3703703703703702, | |
"eval_accuracy": 0.3787878787878788, | |
"eval_loss": 2.8957302570343018, | |
"eval_runtime": 10.7308, | |
"eval_samples_per_second": 12.301, | |
"eval_steps_per_second": 0.652, | |
"step": 4 | |
}, | |
{ | |
"epoch": 3.0, | |
"grad_norm": 171.42628479003906, | |
"learning_rate": 3.125e-05, | |
"loss": 1.3457, | |
"step": 5 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_accuracy": 0.38636363636363635, | |
"eval_loss": 1.5985740423202515, | |
"eval_runtime": 10.7661, | |
"eval_samples_per_second": 12.261, | |
"eval_steps_per_second": 0.65, | |
"step": 5 | |
}, | |
{ | |
"epoch": 3.5555555555555554, | |
"grad_norm": 127.56062316894531, | |
"learning_rate": 2.5e-05, | |
"loss": 0.6679, | |
"step": 6 | |
}, | |
{ | |
"epoch": 3.5555555555555554, | |
"eval_accuracy": 0.5606060606060606, | |
"eval_loss": 0.7368663549423218, | |
"eval_runtime": 10.7584, | |
"eval_samples_per_second": 12.27, | |
"eval_steps_per_second": 0.651, | |
"step": 6 | |
}, | |
{ | |
"epoch": 4.0, | |
"grad_norm": 9.635160446166992, | |
"learning_rate": 1.8750000000000002e-05, | |
"loss": 0.2683, | |
"step": 7 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 1.010995626449585, | |
"eval_runtime": 10.7353, | |
"eval_samples_per_second": 12.296, | |
"eval_steps_per_second": 0.652, | |
"step": 7 | |
}, | |
{ | |
"epoch": 4.7407407407407405, | |
"grad_norm": 142.32652282714844, | |
"learning_rate": 1.25e-05, | |
"loss": 0.7235, | |
"step": 8 | |
}, | |
{ | |
"epoch": 4.7407407407407405, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 1.204453468322754, | |
"eval_runtime": 10.7462, | |
"eval_samples_per_second": 12.283, | |
"eval_steps_per_second": 0.651, | |
"step": 8 | |
}, | |
{ | |
"epoch": 5.0, | |
"grad_norm": 59.76286315917969, | |
"learning_rate": 6.25e-06, | |
"loss": 0.3247, | |
"step": 9 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 1.1849852800369263, | |
"eval_runtime": 10.7387, | |
"eval_samples_per_second": 12.292, | |
"eval_steps_per_second": 0.652, | |
"step": 9 | |
}, | |
{ | |
"epoch": 5.925925925925926, | |
"grad_norm": 205.0324249267578, | |
"learning_rate": 0.0, | |
"loss": 1.0798, | |
"step": 10 | |
}, | |
{ | |
"epoch": 5.925925925925926, | |
"eval_accuracy": 0.6212121212121212, | |
"eval_loss": 1.127427577972412, | |
"eval_runtime": 10.7462, | |
"eval_samples_per_second": 12.283, | |
"eval_steps_per_second": 0.651, | |
"step": 10 | |
}, | |
{ | |
"epoch": 5.925925925925926, | |
"step": 10, | |
"total_flos": 1.4890673025245184e+16, | |
"train_loss": 1.1176074355840684, | |
"train_runtime": 1045.0059, | |
"train_samples_per_second": 5.033, | |
"train_steps_per_second": 0.01 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 10, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"total_flos": 1.4890673025245184e+16, | |
"train_batch_size": 20, | |
"trial_name": null, | |
"trial_params": null | |
} | |