|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1176, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.914965986394558e-05, |
|
"loss": 0.3064, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.8299319727891158e-05, |
|
"loss": 0.0263, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.7448979591836738e-05, |
|
"loss": 0.0175, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6598639455782314e-05, |
|
"loss": 0.0198, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5748299319727894e-05, |
|
"loss": 0.0262, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.4897959183673472e-05, |
|
"loss": 0.0306, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.4047619047619048e-05, |
|
"loss": 0.0254, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.3197278911564626e-05, |
|
"loss": 0.0268, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.2346938775510204e-05, |
|
"loss": 0.0018, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.1496598639455783e-05, |
|
"loss": 0.0219, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_accuracy": 0.9939432578897035, |
|
"eval_loss": 0.05591470003128052, |
|
"eval_runtime": 16.3109, |
|
"eval_samples_per_second": 192.326, |
|
"eval_steps_per_second": 12.078, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.064625850340136e-05, |
|
"loss": 0.0174, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.795918367346939e-06, |
|
"loss": 0.0041, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.945578231292518e-06, |
|
"loss": 0.0101, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 8.095238095238097e-06, |
|
"loss": 0.0102, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.244897959183675e-06, |
|
"loss": 0.0003, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.394557823129253e-06, |
|
"loss": 0.0055, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.54421768707483e-06, |
|
"loss": 0.0004, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.693877551020409e-06, |
|
"loss": 0.0016, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.843537414965986e-06, |
|
"loss": 0.0001, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.993197278911565e-06, |
|
"loss": 0.003, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_accuracy": 0.9987248963978324, |
|
"eval_loss": 0.005054248962551355, |
|
"eval_runtime": 16.2892, |
|
"eval_samples_per_second": 192.581, |
|
"eval_steps_per_second": 12.094, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.0001, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2925170068027212e-06, |
|
"loss": 0.0053, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.421768707482994e-07, |
|
"loss": 0.0001, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1176, |
|
"total_flos": 7490645877017184.0, |
|
"train_loss": 0.023851880903884792, |
|
"train_runtime": 758.8839, |
|
"train_samples_per_second": 49.517, |
|
"train_steps_per_second": 1.55 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1176, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 7490645877017184.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|