|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 693, |
|
"global_step": 555, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0, |
|
"eval_f1": 0.6193506281612009, |
|
"eval_fn": 512, |
|
"eval_fp": 1821, |
|
"eval_loss": 0.736859142780304, |
|
"eval_precision": 0.5103522452272116, |
|
"eval_recall": 0.787551867219917, |
|
"eval_runtime": 69.9291, |
|
"eval_samples_per_second": 66.825, |
|
"eval_steps_per_second": 8.366, |
|
"eval_tn": 442, |
|
"eval_tp": 1898, |
|
"step": 0 |
|
}, |
|
{ |
|
"epoch": 0.018026137899954935, |
|
"grad_norm": 32.56014633178711, |
|
"learning_rate": 4.981949458483755e-05, |
|
"loss": 0.8382, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03605227579990987, |
|
"grad_norm": 27.728681564331055, |
|
"learning_rate": 4.963898916967509e-05, |
|
"loss": 0.5565, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054078413699864804, |
|
"grad_norm": 26.70035171508789, |
|
"learning_rate": 4.945848375451264e-05, |
|
"loss": 0.5545, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07210455159981974, |
|
"grad_norm": 23.187734603881836, |
|
"learning_rate": 4.927797833935018e-05, |
|
"loss": 0.6027, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09013068949977468, |
|
"grad_norm": 35.976619720458984, |
|
"learning_rate": 4.909747292418773e-05, |
|
"loss": 0.5741, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10815682739972961, |
|
"grad_norm": 27.495447158813477, |
|
"learning_rate": 4.891696750902527e-05, |
|
"loss": 0.4941, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12618296529968454, |
|
"grad_norm": 25.021883010864258, |
|
"learning_rate": 4.873646209386282e-05, |
|
"loss": 0.457, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14420910319963948, |
|
"grad_norm": 29.016551971435547, |
|
"learning_rate": 4.855595667870036e-05, |
|
"loss": 0.4215, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.16223524109959442, |
|
"grad_norm": 25.652786254882812, |
|
"learning_rate": 4.837545126353791e-05, |
|
"loss": 0.4292, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18026137899954936, |
|
"grad_norm": 24.254980087280273, |
|
"learning_rate": 4.819494584837546e-05, |
|
"loss": 0.4559, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19828751689950427, |
|
"grad_norm": 43.745643615722656, |
|
"learning_rate": 4.8014440433213e-05, |
|
"loss": 0.4536, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.21631365479945922, |
|
"grad_norm": 19.265901565551758, |
|
"learning_rate": 4.783393501805055e-05, |
|
"loss": 0.4634, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23433979269941416, |
|
"grad_norm": 75.13768768310547, |
|
"learning_rate": 4.765342960288809e-05, |
|
"loss": 0.4067, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.25236593059936907, |
|
"grad_norm": 24.337844848632812, |
|
"learning_rate": 4.747292418772563e-05, |
|
"loss": 0.4483, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.270392068499324, |
|
"grad_norm": 27.518775939941406, |
|
"learning_rate": 4.7292418772563177e-05, |
|
"loss": 0.4524, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.28841820639927895, |
|
"grad_norm": 23.281097412109375, |
|
"learning_rate": 4.711191335740072e-05, |
|
"loss": 0.4443, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3064443442992339, |
|
"grad_norm": 12.933683395385742, |
|
"learning_rate": 4.693140794223827e-05, |
|
"loss": 0.472, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.32447048219918884, |
|
"grad_norm": 16.49010467529297, |
|
"learning_rate": 4.675090252707581e-05, |
|
"loss": 0.4549, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3424966200991438, |
|
"grad_norm": 19.209016799926758, |
|
"learning_rate": 4.657039711191336e-05, |
|
"loss": 0.4628, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3605227579990987, |
|
"grad_norm": 19.71845054626465, |
|
"learning_rate": 4.63898916967509e-05, |
|
"loss": 0.4281, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3785488958990536, |
|
"grad_norm": 19.700483322143555, |
|
"learning_rate": 4.620938628158845e-05, |
|
"loss": 0.4427, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.39657503379900855, |
|
"grad_norm": 19.06282615661621, |
|
"learning_rate": 4.602888086642599e-05, |
|
"loss": 0.4214, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4146011716989635, |
|
"grad_norm": 14.681564331054688, |
|
"learning_rate": 4.584837545126354e-05, |
|
"loss": 0.4279, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.43262730959891843, |
|
"grad_norm": 28.783143997192383, |
|
"learning_rate": 4.566787003610109e-05, |
|
"loss": 0.4475, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.45065344749887337, |
|
"grad_norm": 14.686984062194824, |
|
"learning_rate": 4.548736462093863e-05, |
|
"loss": 0.4232, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.4686795853988283, |
|
"grad_norm": 15.651535987854004, |
|
"learning_rate": 4.530685920577618e-05, |
|
"loss": 0.4026, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.48670572329878325, |
|
"grad_norm": 17.506153106689453, |
|
"learning_rate": 4.5126353790613716e-05, |
|
"loss": 0.416, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5047318611987381, |
|
"grad_norm": 23.381561279296875, |
|
"learning_rate": 4.494584837545127e-05, |
|
"loss": 0.385, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5227579990986931, |
|
"grad_norm": 18.458227157592773, |
|
"learning_rate": 4.4765342960288806e-05, |
|
"loss": 0.4352, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.540784136998648, |
|
"grad_norm": 16.285743713378906, |
|
"learning_rate": 4.458483754512636e-05, |
|
"loss": 0.4191, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.558810274898603, |
|
"grad_norm": 22.14523696899414, |
|
"learning_rate": 4.44043321299639e-05, |
|
"loss": 0.4525, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5768364127985579, |
|
"grad_norm": 12.94579029083252, |
|
"learning_rate": 4.422382671480145e-05, |
|
"loss": 0.4335, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5948625506985128, |
|
"grad_norm": 29.218290328979492, |
|
"learning_rate": 4.404332129963899e-05, |
|
"loss": 0.4194, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6128886885984678, |
|
"grad_norm": 20.067014694213867, |
|
"learning_rate": 4.386281588447654e-05, |
|
"loss": 0.4016, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6309148264984227, |
|
"grad_norm": 15.315394401550293, |
|
"learning_rate": 4.368231046931408e-05, |
|
"loss": 0.4293, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6489409643983777, |
|
"grad_norm": 28.87151527404785, |
|
"learning_rate": 4.350180505415163e-05, |
|
"loss": 0.3952, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6669671022983326, |
|
"grad_norm": 12.91943645477295, |
|
"learning_rate": 4.332129963898917e-05, |
|
"loss": 0.4276, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.6849932401982876, |
|
"grad_norm": 18.917442321777344, |
|
"learning_rate": 4.314079422382672e-05, |
|
"loss": 0.3843, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.7030193780982424, |
|
"grad_norm": 16.344675064086914, |
|
"learning_rate": 4.296028880866426e-05, |
|
"loss": 0.4283, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.7210455159981974, |
|
"grad_norm": 27.420021057128906, |
|
"learning_rate": 4.277978339350181e-05, |
|
"loss": 0.4663, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7390716538981523, |
|
"grad_norm": 37.13134765625, |
|
"learning_rate": 4.259927797833935e-05, |
|
"loss": 0.4338, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.7570977917981072, |
|
"grad_norm": 23.59578514099121, |
|
"learning_rate": 4.24187725631769e-05, |
|
"loss": 0.3714, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7751239296980622, |
|
"grad_norm": 30.009662628173828, |
|
"learning_rate": 4.223826714801444e-05, |
|
"loss": 0.3775, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7931500675980171, |
|
"grad_norm": 13.575807571411133, |
|
"learning_rate": 4.205776173285199e-05, |
|
"loss": 0.4104, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.8111762054979721, |
|
"grad_norm": 18.727609634399414, |
|
"learning_rate": 4.187725631768953e-05, |
|
"loss": 0.3804, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.829202343397927, |
|
"grad_norm": 14.367807388305664, |
|
"learning_rate": 4.169675090252708e-05, |
|
"loss": 0.4124, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.847228481297882, |
|
"grad_norm": 12.945828437805176, |
|
"learning_rate": 4.151624548736462e-05, |
|
"loss": 0.4149, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.8652546191978369, |
|
"grad_norm": 24.02052116394043, |
|
"learning_rate": 4.1335740072202167e-05, |
|
"loss": 0.4093, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8832807570977917, |
|
"grad_norm": 15.609790802001953, |
|
"learning_rate": 4.115523465703972e-05, |
|
"loss": 0.3874, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9013068949977467, |
|
"grad_norm": 25.04174041748047, |
|
"learning_rate": 4.0974729241877256e-05, |
|
"loss": 0.391, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9193330328977016, |
|
"grad_norm": 20.739681243896484, |
|
"learning_rate": 4.079422382671481e-05, |
|
"loss": 0.3939, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.9373591707976566, |
|
"grad_norm": 16.13037872314453, |
|
"learning_rate": 4.0613718411552346e-05, |
|
"loss": 0.4004, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.9553853086976115, |
|
"grad_norm": 21.17761993408203, |
|
"learning_rate": 4.043321299638989e-05, |
|
"loss": 0.3978, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.9734114465975665, |
|
"grad_norm": 27.03806495666504, |
|
"learning_rate": 4.0252707581227436e-05, |
|
"loss": 0.4391, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.9914375844975214, |
|
"grad_norm": 15.054065704345703, |
|
"learning_rate": 4.007220216606498e-05, |
|
"loss": 0.4187, |
|
"step": 550 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2770, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.929257128088371e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|