|
{ |
|
"best_metric": 4.958001136779785, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.01408153207068929, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002816306414137858, |
|
"grad_norm": 48.62163543701172, |
|
"learning_rate": 0.0001, |
|
"loss": 23.4818, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002816306414137858, |
|
"eval_loss": 7.5018486976623535, |
|
"eval_runtime": 400.4908, |
|
"eval_samples_per_second": 3.733, |
|
"eval_steps_per_second": 1.868, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005632612828275716, |
|
"grad_norm": 50.03125762939453, |
|
"learning_rate": 0.0002, |
|
"loss": 25.3606, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008448919242413575, |
|
"grad_norm": 36.288734436035156, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 24.6152, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011265225656551432, |
|
"grad_norm": 35.620784759521484, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 21.223, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0014081532070689292, |
|
"grad_norm": 38.215370178222656, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 18.8518, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.001689783848482715, |
|
"grad_norm": 24.101730346679688, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 18.2537, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001971414489896501, |
|
"grad_norm": 23.53477668762207, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 18.4047, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0022530451313102864, |
|
"grad_norm": 22.56336212158203, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 17.9984, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0025346757727240724, |
|
"grad_norm": 25.97075080871582, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 18.8505, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0028163064141378583, |
|
"grad_norm": 29.8724365234375, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 19.6848, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003097937055551644, |
|
"grad_norm": 73.4782943725586, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 19.3712, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00337956769696543, |
|
"grad_norm": 52.7482795715332, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 18.7228, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0036611983383792158, |
|
"grad_norm": 26.27608299255371, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 19.0088, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003942828979793002, |
|
"grad_norm": 28.202800750732422, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 18.9887, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004224459621206787, |
|
"grad_norm": 50.869102478027344, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 20.59, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004506090262620573, |
|
"grad_norm": 32.090274810791016, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 18.3551, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004787720904034359, |
|
"grad_norm": 29.982337951660156, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 18.6836, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005069351545448145, |
|
"grad_norm": 35.667816162109375, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 19.2915, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00535098218686193, |
|
"grad_norm": 26.442718505859375, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 20.3658, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005632612828275717, |
|
"grad_norm": 33.17983627319336, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 20.531, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.005914243469689502, |
|
"grad_norm": 24.06730842590332, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 19.4856, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.006195874111103288, |
|
"grad_norm": 22.876537322998047, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 20.0974, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.006477504752517074, |
|
"grad_norm": 23.80173683166504, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 19.9339, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00675913539393086, |
|
"grad_norm": 23.31247329711914, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 20.3456, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.007040766035344645, |
|
"grad_norm": 21.842885971069336, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 19.6822, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007040766035344645, |
|
"eval_loss": 5.01399564743042, |
|
"eval_runtime": 403.6754, |
|
"eval_samples_per_second": 3.703, |
|
"eval_steps_per_second": 1.853, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0073223966767584315, |
|
"grad_norm": 22.809677124023438, |
|
"learning_rate": 0.0001, |
|
"loss": 19.6608, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.007604027318172217, |
|
"grad_norm": 26.614421844482422, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 20.0502, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.007885657959586003, |
|
"grad_norm": 22.92184066772461, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 20.2952, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.008167288600999788, |
|
"grad_norm": 25.129104614257812, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 20.0264, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008448919242413575, |
|
"grad_norm": 25.90605354309082, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 20.6772, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008730549883827361, |
|
"grad_norm": 23.097158432006836, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 19.3772, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.009012180525241146, |
|
"grad_norm": 19.780305862426758, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 19.4421, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.009293811166654932, |
|
"grad_norm": 23.319713592529297, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 19.4254, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.009575441808068718, |
|
"grad_norm": 25.547657012939453, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 20.5563, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.009857072449482503, |
|
"grad_norm": 22.37945556640625, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 21.7697, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01013870309089629, |
|
"grad_norm": 25.38311195373535, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 21.9471, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010420333732310076, |
|
"grad_norm": 20.930171966552734, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 21.8591, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01070196437372386, |
|
"grad_norm": 30.409629821777344, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 23.305, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.010983595015137647, |
|
"grad_norm": 19.549955368041992, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 22.0507, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.011265225656551433, |
|
"grad_norm": 25.194637298583984, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 22.3263, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.011546856297965218, |
|
"grad_norm": 24.036544799804688, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 22.2299, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.011828486939379004, |
|
"grad_norm": 24.677072525024414, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 22.3443, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01211011758079279, |
|
"grad_norm": 23.8966121673584, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 22.0331, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.012391748222206575, |
|
"grad_norm": 21.31231117248535, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 21.334, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012673378863620362, |
|
"grad_norm": 22.37716293334961, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 22.0767, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.012955009505034148, |
|
"grad_norm": 22.888154983520508, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 22.6545, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.013236640146447933, |
|
"grad_norm": 21.42641830444336, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 24.4605, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01351827078786172, |
|
"grad_norm": 22.642194747924805, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 24.1472, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013799901429275506, |
|
"grad_norm": 31.944076538085938, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 23.9481, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01408153207068929, |
|
"grad_norm": 50.73974609375, |
|
"learning_rate": 0.0, |
|
"loss": 26.5203, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01408153207068929, |
|
"eval_loss": 4.958001136779785, |
|
"eval_runtime": 403.6789, |
|
"eval_samples_per_second": 3.703, |
|
"eval_steps_per_second": 1.853, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.03129498681344e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|