|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.47368421052631576, |
|
"eval_steps": 9, |
|
"global_step": 99, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.004784688995215311, |
|
"eval_loss": 10.365926742553711, |
|
"eval_runtime": 1.8443, |
|
"eval_samples_per_second": 762.886, |
|
"eval_steps_per_second": 23.857, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014354066985645933, |
|
"grad_norm": 0.1034882590174675, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3664, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.028708133971291867, |
|
"grad_norm": 0.10635757446289062, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3653, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0430622009569378, |
|
"grad_norm": 0.11460530757904053, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3645, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0430622009569378, |
|
"eval_loss": 10.362683296203613, |
|
"eval_runtime": 1.8443, |
|
"eval_samples_per_second": 762.883, |
|
"eval_steps_per_second": 23.857, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05741626794258373, |
|
"grad_norm": 0.12390146404504776, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.3624, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.07177033492822966, |
|
"grad_norm": 0.14381946623325348, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.3584, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0861244019138756, |
|
"grad_norm": 0.16828647255897522, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.3555, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0861244019138756, |
|
"eval_loss": 10.352652549743652, |
|
"eval_runtime": 1.87, |
|
"eval_samples_per_second": 752.399, |
|
"eval_steps_per_second": 23.529, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.10047846889952153, |
|
"grad_norm": 0.19975420832633972, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 10.3511, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11483253588516747, |
|
"grad_norm": 0.21127574145793915, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.3466, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.1291866028708134, |
|
"grad_norm": 0.21262452006340027, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 10.3415, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1291866028708134, |
|
"eval_loss": 10.338648796081543, |
|
"eval_runtime": 1.8449, |
|
"eval_samples_per_second": 762.643, |
|
"eval_steps_per_second": 23.85, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14354066985645933, |
|
"grad_norm": 0.20146459341049194, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 10.3382, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 0.1689472794532776, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 10.3341, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1722488038277512, |
|
"grad_norm": 0.1589594930410385, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 10.3297, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1722488038277512, |
|
"eval_loss": 10.327372550964355, |
|
"eval_runtime": 1.8458, |
|
"eval_samples_per_second": 762.288, |
|
"eval_steps_per_second": 23.838, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18660287081339713, |
|
"grad_norm": 0.14408323168754578, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 10.3273, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.20095693779904306, |
|
"grad_norm": 0.13509996235370636, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 10.3244, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.215311004784689, |
|
"grad_norm": 0.11339321732521057, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 10.3244, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.215311004784689, |
|
"eval_loss": 10.321708679199219, |
|
"eval_runtime": 1.8345, |
|
"eval_samples_per_second": 766.961, |
|
"eval_steps_per_second": 23.985, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22966507177033493, |
|
"grad_norm": 0.108404740691185, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 10.3215, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.24401913875598086, |
|
"grad_norm": 0.10472545027732849, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 10.3201, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.2583732057416268, |
|
"grad_norm": 0.10555419325828552, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 10.3182, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2583732057416268, |
|
"eval_loss": 10.317863464355469, |
|
"eval_runtime": 1.8688, |
|
"eval_samples_per_second": 752.905, |
|
"eval_steps_per_second": 23.545, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2727272727272727, |
|
"grad_norm": 0.09443788975477219, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 10.3177, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.28708133971291866, |
|
"grad_norm": 0.09167303889989853, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 10.3162, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3014354066985646, |
|
"grad_norm": 0.09214193373918533, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 10.3162, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3014354066985646, |
|
"eval_loss": 10.314752578735352, |
|
"eval_runtime": 1.8438, |
|
"eval_samples_per_second": 763.116, |
|
"eval_steps_per_second": 23.864, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 0.0888906717300415, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 10.3144, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.33014354066985646, |
|
"grad_norm": 0.08958574384450912, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 10.3132, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.3444976076555024, |
|
"grad_norm": 0.08907806128263474, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 10.313, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3444976076555024, |
|
"eval_loss": 10.312495231628418, |
|
"eval_runtime": 1.8465, |
|
"eval_samples_per_second": 761.964, |
|
"eval_steps_per_second": 23.828, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.3588516746411483, |
|
"grad_norm": 0.08638429641723633, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 10.3127, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.37320574162679426, |
|
"grad_norm": 0.08639028668403625, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 10.313, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.3875598086124402, |
|
"grad_norm": 0.08610237389802933, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 10.3116, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.3875598086124402, |
|
"eval_loss": 10.311205863952637, |
|
"eval_runtime": 1.8411, |
|
"eval_samples_per_second": 764.228, |
|
"eval_steps_per_second": 23.899, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.4019138755980861, |
|
"grad_norm": 0.08259936422109604, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 10.3114, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.41626794258373206, |
|
"grad_norm": 0.08639667928218842, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 10.3115, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.430622009569378, |
|
"grad_norm": 0.08296097069978714, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 10.3116, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.430622009569378, |
|
"eval_loss": 10.310672760009766, |
|
"eval_runtime": 1.8404, |
|
"eval_samples_per_second": 764.503, |
|
"eval_steps_per_second": 23.908, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.4449760765550239, |
|
"grad_norm": 0.07877939194440842, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 10.3123, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.45933014354066987, |
|
"grad_norm": 0.08165387064218521, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 10.3103, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 0.08330517262220383, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 10.3102, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"eval_loss": 10.310574531555176, |
|
"eval_runtime": 1.8448, |
|
"eval_samples_per_second": 762.695, |
|
"eval_steps_per_second": 23.851, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 88348123201536.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|