geshijoker's picture
End of training
fc4e5d2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9850746268656714,
"eval_steps": 500,
"global_step": 525,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05685856432125089,
"grad_norm": 0.7364946007728577,
"learning_rate": 1.8518518518518518e-05,
"loss": 2.6696,
"step": 10
},
{
"epoch": 0.11371712864250177,
"grad_norm": 0.35253632068634033,
"learning_rate": 3.7037037037037037e-05,
"loss": 2.4653,
"step": 20
},
{
"epoch": 0.17057569296375266,
"grad_norm": 0.3261440098285675,
"learning_rate": 4.999552306674344e-05,
"loss": 2.3603,
"step": 30
},
{
"epoch": 0.22743425728500355,
"grad_norm": 0.34980908036231995,
"learning_rate": 4.9915977740145865e-05,
"loss": 2.2758,
"step": 40
},
{
"epoch": 0.28429282160625446,
"grad_norm": 0.31713271141052246,
"learning_rate": 4.973730929382489e-05,
"loss": 2.2152,
"step": 50
},
{
"epoch": 0.3411513859275053,
"grad_norm": 0.4006229639053345,
"learning_rate": 4.946022852363932e-05,
"loss": 2.163,
"step": 60
},
{
"epoch": 0.39800995024875624,
"grad_norm": 0.4309302568435669,
"learning_rate": 4.9085837738743645e-05,
"loss": 2.1219,
"step": 70
},
{
"epoch": 0.4548685145700071,
"grad_norm": 0.41214120388031006,
"learning_rate": 4.8615626376276627e-05,
"loss": 2.0718,
"step": 80
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.4962936341762543,
"learning_rate": 4.8051465075940336e-05,
"loss": 2.0389,
"step": 90
},
{
"epoch": 0.5685856432125089,
"grad_norm": 0.6769933104515076,
"learning_rate": 4.73955982380429e-05,
"loss": 2.0079,
"step": 100
},
{
"epoch": 0.6254442075337597,
"grad_norm": 0.5076603293418884,
"learning_rate": 4.665063509461097e-05,
"loss": 2.0021,
"step": 110
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.49396389722824097,
"learning_rate": 4.581953932909403e-05,
"loss": 1.97,
"step": 120
},
{
"epoch": 0.7391613361762616,
"grad_norm": 0.5190107226371765,
"learning_rate": 4.49056172859561e-05,
"loss": 1.9661,
"step": 130
},
{
"epoch": 0.7960199004975125,
"grad_norm": 0.5600285530090332,
"learning_rate": 4.391250481706078e-05,
"loss": 1.9294,
"step": 140
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.5234688520431519,
"learning_rate": 4.284415281717847e-05,
"loss": 1.9263,
"step": 150
},
{
"epoch": 0.9097370291400142,
"grad_norm": 0.4960320293903351,
"learning_rate": 4.1704811506159904e-05,
"loss": 1.9453,
"step": 160
},
{
"epoch": 0.9665955934612651,
"grad_norm": 0.5181735754013062,
"learning_rate": 4.0499013520305975e-05,
"loss": 1.8972,
"step": 170
},
{
"epoch": 1.023454157782516,
"grad_norm": 0.6533639430999756,
"learning_rate": 3.9231555880201655e-05,
"loss": 1.9302,
"step": 180
},
{
"epoch": 1.080312722103767,
"grad_norm": 0.5052580237388611,
"learning_rate": 3.7907480906751014e-05,
"loss": 1.8674,
"step": 190
},
{
"epoch": 1.1371712864250179,
"grad_norm": 0.4947746992111206,
"learning_rate": 3.6532056161335147e-05,
"loss": 1.8639,
"step": 200
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.5192781686782837,
"learning_rate": 3.511075348989692e-05,
"loss": 1.86,
"step": 210
},
{
"epoch": 1.2508884150675195,
"grad_norm": 0.5064213275909424,
"learning_rate": 3.364922725432147e-05,
"loss": 1.832,
"step": 220
},
{
"epoch": 1.3077469793887704,
"grad_norm": 0.520462691783905,
"learning_rate": 3.2153291837714214e-05,
"loss": 1.8467,
"step": 230
},
{
"epoch": 1.3646055437100213,
"grad_norm": 0.48832887411117554,
"learning_rate": 3.0628898513067353e-05,
"loss": 1.838,
"step": 240
},
{
"epoch": 1.4214641080312722,
"grad_norm": 0.6568878293037415,
"learning_rate": 2.908211176733781e-05,
"loss": 1.8377,
"step": 250
},
{
"epoch": 1.4783226723525231,
"grad_norm": 0.6199402809143066,
"learning_rate": 2.751908517512671e-05,
"loss": 1.8426,
"step": 260
},
{
"epoch": 1.535181236673774,
"grad_norm": 0.5781152844429016,
"learning_rate": 2.5946036917941762e-05,
"loss": 1.8193,
"step": 270
},
{
"epoch": 1.5920398009950247,
"grad_norm": 0.5179237127304077,
"learning_rate": 2.4369225046434392e-05,
"loss": 1.8371,
"step": 280
},
{
"epoch": 1.6488983653162759,
"grad_norm": 0.6433179974555969,
"learning_rate": 2.279492258402559e-05,
"loss": 1.8118,
"step": 290
},
{
"epoch": 1.7057569296375266,
"grad_norm": 0.522022008895874,
"learning_rate": 2.1229392570965657e-05,
"loss": 1.8143,
"step": 300
},
{
"epoch": 1.7626154939587777,
"grad_norm": 0.6466122269630432,
"learning_rate": 1.9678863148109977e-05,
"loss": 1.8214,
"step": 310
},
{
"epoch": 1.8194740582800284,
"grad_norm": 0.5710527896881104,
"learning_rate": 1.814950277953485e-05,
"loss": 1.8147,
"step": 320
},
{
"epoch": 1.8763326226012793,
"grad_norm": 0.582222580909729,
"learning_rate": 1.6647395712565256e-05,
"loss": 1.7966,
"step": 330
},
{
"epoch": 1.9331911869225302,
"grad_norm": 0.5713987350463867,
"learning_rate": 1.5178517772841655e-05,
"loss": 1.7946,
"step": 340
},
{
"epoch": 1.9900497512437811,
"grad_norm": 0.5466066002845764,
"learning_rate": 1.3748712590720304e-05,
"loss": 1.7996,
"step": 350
},
{
"epoch": 2.046908315565032,
"grad_norm": 0.5649642944335938,
"learning_rate": 1.2363668353585487e-05,
"loss": 1.7954,
"step": 360
},
{
"epoch": 2.1037668798862827,
"grad_norm": 0.5820536613464355,
"learning_rate": 1.1028895176559767e-05,
"loss": 1.7484,
"step": 370
},
{
"epoch": 2.160625444207534,
"grad_norm": 0.5737554430961609,
"learning_rate": 9.749703181638356e-06,
"loss": 1.7479,
"step": 380
},
{
"epoch": 2.2174840085287846,
"grad_norm": 0.5488157272338867,
"learning_rate": 8.531181372455161e-06,
"loss": 1.767,
"step": 390
},
{
"epoch": 2.2743425728500357,
"grad_norm": 0.5780847668647766,
"learning_rate": 7.378177388723001e-06,
"loss": 1.7762,
"step": 400
},
{
"epoch": 2.3312011371712864,
"grad_norm": 0.5912370085716248,
"learning_rate": 6.2952782208908625e-06,
"loss": 1.7534,
"step": 410
},
{
"epoch": 2.388059701492537,
"grad_norm": 0.6620157957077026,
"learning_rate": 5.2867919617408556e-06,
"loss": 1.7494,
"step": 420
},
{
"epoch": 2.4449182658137882,
"grad_norm": 0.5841735005378723,
"learning_rate": 4.35673066752249e-06,
"loss": 1.7481,
"step": 430
},
{
"epoch": 2.501776830135039,
"grad_norm": 0.5628016591072083,
"learning_rate": 3.508794396807749e-06,
"loss": 1.7517,
"step": 440
},
{
"epoch": 2.55863539445629,
"grad_norm": 0.5885946154594421,
"learning_rate": 2.7463564905650858e-06,
"loss": 1.7676,
"step": 450
},
{
"epoch": 2.6154939587775408,
"grad_norm": 0.5894684791564941,
"learning_rate": 2.0724501520125506e-06,
"loss": 1.7411,
"step": 460
},
{
"epoch": 2.672352523098792,
"grad_norm": 0.6206966042518616,
"learning_rate": 1.4897563796393593e-06,
"loss": 1.7338,
"step": 470
},
{
"epoch": 2.7292110874200426,
"grad_norm": 0.5665762424468994,
"learning_rate": 1.0005933014019308e-06,
"loss": 1.7395,
"step": 480
},
{
"epoch": 2.7860696517412933,
"grad_norm": 0.5661548972129822,
"learning_rate": 6.069069525261011e-07,
"loss": 1.7523,
"step": 490
},
{
"epoch": 2.8429282160625444,
"grad_norm": 0.5656740665435791,
"learning_rate": 3.10263533604116e-07,
"loss": 1.7606,
"step": 500
},
{
"epoch": 2.8429282160625444,
"eval_loss": 1.8154741525650024,
"eval_runtime": 207.4227,
"eval_samples_per_second": 48.211,
"eval_steps_per_second": 0.757,
"step": 500
},
{
"epoch": 2.8997867803837956,
"grad_norm": 0.565244734287262,
"learning_rate": 1.1184317978602809e-07,
"loss": 1.7441,
"step": 510
},
{
"epoch": 2.9566453447050463,
"grad_norm": 0.5718834400177002,
"learning_rate": 1.2435265853436017e-08,
"loss": 1.7484,
"step": 520
},
{
"epoch": 2.9850746268656714,
"step": 525,
"total_flos": 4.318254146969928e+17,
"train_loss": 1.9003193955194382,
"train_runtime": 24288.4682,
"train_samples_per_second": 11.116,
"train_steps_per_second": 0.022
}
],
"logging_steps": 10,
"max_steps": 525,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.318254146969928e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}