gemma7b-gpt4o_1k_summarize-fft / trainer_state.json
chansung's picture
Model save
f7f016c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 392,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002551020408163265,
"grad_norm": 14000.175878727785,
"learning_rate": 7.499999999999999e-06,
"loss": 48.0695,
"step": 1
},
{
"epoch": 0.012755102040816327,
"grad_norm": 3007.3104608903254,
"learning_rate": 3.75e-05,
"loss": 32.7037,
"step": 5
},
{
"epoch": 0.025510204081632654,
"grad_norm": 4753.9239460531635,
"learning_rate": 7.5e-05,
"loss": 27.6128,
"step": 10
},
{
"epoch": 0.03826530612244898,
"grad_norm": 7672.954437161885,
"learning_rate": 0.0001125,
"loss": 118.424,
"step": 15
},
{
"epoch": 0.05102040816326531,
"grad_norm": 2658.2281406370403,
"learning_rate": 0.00015,
"loss": 36.7995,
"step": 20
},
{
"epoch": 0.06377551020408163,
"grad_norm": 301.82927722978826,
"learning_rate": 0.00018749999999999998,
"loss": 25.8987,
"step": 25
},
{
"epoch": 0.07653061224489796,
"grad_norm": 261.2743806851815,
"learning_rate": 0.000225,
"loss": 19.7068,
"step": 30
},
{
"epoch": 0.08928571428571429,
"grad_norm": 170.80865284544745,
"learning_rate": 0.0002625,
"loss": 16.2639,
"step": 35
},
{
"epoch": 0.10204081632653061,
"grad_norm": 371.9839372489695,
"learning_rate": 0.0003,
"loss": 14.8666,
"step": 40
},
{
"epoch": 0.11479591836734694,
"grad_norm": 55.46043538057413,
"learning_rate": 0.00029985067118498503,
"loss": 13.883,
"step": 45
},
{
"epoch": 0.12755102040816327,
"grad_norm": 61.65936269712346,
"learning_rate": 0.00029940298206120687,
"loss": 11.3142,
"step": 50
},
{
"epoch": 0.14030612244897958,
"grad_norm": 49.78316202658068,
"learning_rate": 0.0002986578240004834,
"loss": 10.2446,
"step": 55
},
{
"epoch": 0.15306122448979592,
"grad_norm": 95.22883433955214,
"learning_rate": 0.0002976166806504174,
"loss": 9.2125,
"step": 60
},
{
"epoch": 0.16581632653061223,
"grad_norm": 87.6593544514716,
"learning_rate": 0.00029628162498037856,
"loss": 8.0024,
"step": 65
},
{
"epoch": 0.17857142857142858,
"grad_norm": 29.457366532379673,
"learning_rate": 0.000294655315154116,
"loss": 7.0794,
"step": 70
},
{
"epoch": 0.1913265306122449,
"grad_norm": 23.54866864598235,
"learning_rate": 0.0002927409892372191,
"loss": 6.428,
"step": 75
},
{
"epoch": 0.20408163265306123,
"grad_norm": 14.487277559435018,
"learning_rate": 0.00029054245874996426,
"loss": 6.0644,
"step": 80
},
{
"epoch": 0.21683673469387754,
"grad_norm": 11.612016698553127,
"learning_rate": 0.00028806410107838377,
"loss": 5.9737,
"step": 85
},
{
"epoch": 0.22959183673469388,
"grad_norm": 22.24847439069348,
"learning_rate": 0.00028531085075866815,
"loss": 5.6293,
"step": 90
},
{
"epoch": 0.2423469387755102,
"grad_norm": 25.32273350126865,
"learning_rate": 0.0002822881896522532,
"loss": 5.4368,
"step": 95
},
{
"epoch": 0.25510204081632654,
"grad_norm": 29.73658242449881,
"learning_rate": 0.000279002136031155,
"loss": 5.2007,
"step": 100
},
{
"epoch": 0.26785714285714285,
"grad_norm": 17.009329865460675,
"learning_rate": 0.0002754592325952845,
"loss": 4.9784,
"step": 105
},
{
"epoch": 0.28061224489795916,
"grad_norm": 17.65449666930728,
"learning_rate": 0.00027166653344559883,
"loss": 5.1761,
"step": 110
},
{
"epoch": 0.29336734693877553,
"grad_norm": 25.60749450078011,
"learning_rate": 0.0002676315900390278,
"loss": 4.981,
"step": 115
},
{
"epoch": 0.30612244897959184,
"grad_norm": 19.37666875758449,
"learning_rate": 0.00026336243615313873,
"loss": 4.883,
"step": 120
},
{
"epoch": 0.31887755102040816,
"grad_norm": 25.223445103235044,
"learning_rate": 0.00025886757189047695,
"loss": 4.6906,
"step": 125
},
{
"epoch": 0.33163265306122447,
"grad_norm": 24.827221258799884,
"learning_rate": 0.0002541559467544291,
"loss": 4.5962,
"step": 130
},
{
"epoch": 0.34438775510204084,
"grad_norm": 19.172749908950284,
"learning_rate": 0.00024923694183030657,
"loss": 4.4971,
"step": 135
},
{
"epoch": 0.35714285714285715,
"grad_norm": 17.803290735515485,
"learning_rate": 0.0002441203511071278,
"loss": 4.3923,
"step": 140
},
{
"epoch": 0.36989795918367346,
"grad_norm": 15.152588588737128,
"learning_rate": 0.00023881636197728743,
"loss": 4.2507,
"step": 145
},
{
"epoch": 0.3826530612244898,
"grad_norm": 28.266308285286772,
"learning_rate": 0.0002333355349529403,
"loss": 4.1071,
"step": 150
},
{
"epoch": 0.39540816326530615,
"grad_norm": 13.058293075351123,
"learning_rate": 0.00022768878263948445,
"loss": 4.0515,
"step": 155
},
{
"epoch": 0.40816326530612246,
"grad_norm": 14.87484760188348,
"learning_rate": 0.00022188734800800852,
"loss": 3.9059,
"step": 160
},
{
"epoch": 0.42091836734693877,
"grad_norm": 12.57655967452742,
"learning_rate": 0.00021594278200996457,
"loss": 3.876,
"step": 165
},
{
"epoch": 0.4336734693877551,
"grad_norm": 18.644576504823885,
"learning_rate": 0.00020986692057863607,
"loss": 3.703,
"step": 170
},
{
"epoch": 0.44642857142857145,
"grad_norm": 6.3705323701789265,
"learning_rate": 0.000203671861063193,
"loss": 3.6606,
"step": 175
},
{
"epoch": 0.45918367346938777,
"grad_norm": 9.945740907460738,
"learning_rate": 0.00019736993814225374,
"loss": 3.6781,
"step": 180
},
{
"epoch": 0.4719387755102041,
"grad_norm": 5.28597104720817,
"learning_rate": 0.00019097369926491297,
"loss": 3.5963,
"step": 185
},
{
"epoch": 0.4846938775510204,
"grad_norm": 5.570474507029548,
"learning_rate": 0.00018449587966813208,
"loss": 3.5206,
"step": 190
},
{
"epoch": 0.49744897959183676,
"grad_norm": 5.769299754282368,
"learning_rate": 0.00017794937702023467,
"loss": 3.3751,
"step": 195
},
{
"epoch": 0.5102040816326531,
"grad_norm": 7.514106159557929,
"learning_rate": 0.00017134722574099276,
"loss": 3.3315,
"step": 200
},
{
"epoch": 0.5229591836734694,
"grad_norm": 5.467040990097222,
"learning_rate": 0.0001647025710494341,
"loss": 3.2297,
"step": 205
},
{
"epoch": 0.5357142857142857,
"grad_norm": 7.752282985206345,
"learning_rate": 0.00015802864279104223,
"loss": 3.1714,
"step": 210
},
{
"epoch": 0.548469387755102,
"grad_norm": 4.902568385413757,
"learning_rate": 0.0001513387290964616,
"loss": 3.076,
"step": 215
},
{
"epoch": 0.5612244897959183,
"grad_norm": 8.809433652257214,
"learning_rate": 0.00014464614992415294,
"loss": 3.0378,
"step": 220
},
{
"epoch": 0.5739795918367347,
"grad_norm": 5.62075198192736,
"learning_rate": 0.00013796423053967887,
"loss": 2.946,
"step": 225
},
{
"epoch": 0.5867346938775511,
"grad_norm": 7.091999165572766,
"learning_rate": 0.0001313062749844221,
"loss": 2.9117,
"step": 230
},
{
"epoch": 0.5994897959183674,
"grad_norm": 9.664621289964327,
"learning_rate": 0.00012468553958656264,
"loss": 2.7779,
"step": 235
},
{
"epoch": 0.6122448979591837,
"grad_norm": 8.079291607422778,
"learning_rate": 0.00011811520656705348,
"loss": 2.7627,
"step": 240
},
{
"epoch": 0.625,
"grad_norm": 9.132106023308898,
"learning_rate": 0.00011160835779314889,
"loss": 2.7464,
"step": 245
},
{
"epoch": 0.6377551020408163,
"grad_norm": 12.688497775305251,
"learning_rate": 0.00010517794873174064,
"loss": 2.6784,
"step": 250
},
{
"epoch": 0.6505102040816326,
"grad_norm": 7.508821446542865,
"learning_rate": 9.883678265436473e-05,
"loss": 2.6542,
"step": 255
},
{
"epoch": 0.6632653061224489,
"grad_norm": 7.288150995036509,
"learning_rate": 9.259748514523653e-05,
"loss": 2.5675,
"step": 260
},
{
"epoch": 0.6760204081632653,
"grad_norm": 2.598306498532322,
"learning_rate": 8.647247896307018e-05,
"loss": 2.5179,
"step": 265
},
{
"epoch": 0.6887755102040817,
"grad_norm": 2.9340097554582947,
"learning_rate": 8.047395930673417e-05,
"loss": 2.4673,
"step": 270
},
{
"epoch": 0.701530612244898,
"grad_norm": 3.520741961544552,
"learning_rate": 7.46138695339903e-05,
"loss": 2.5285,
"step": 275
},
{
"epoch": 0.7142857142857143,
"grad_norm": 4.635037745202294,
"learning_rate": 6.890387738166041e-05,
"loss": 2.3872,
"step": 280
},
{
"epoch": 0.7270408163265306,
"grad_norm": 3.987422126482862,
"learning_rate": 6.335535173456914e-05,
"loss": 2.3409,
"step": 285
},
{
"epoch": 0.7397959183673469,
"grad_norm": 3.0549621746230122,
"learning_rate": 5.79793399895161e-05,
"loss": 2.3254,
"step": 290
},
{
"epoch": 0.7525510204081632,
"grad_norm": 2.527266957929868,
"learning_rate": 5.278654605934644e-05,
"loss": 2.3429,
"step": 295
},
{
"epoch": 0.7653061224489796,
"grad_norm": 3.9761126497464083,
"learning_rate": 4.778730906091632e-05,
"loss": 2.2348,
"step": 300
},
{
"epoch": 0.7780612244897959,
"grad_norm": 2.498797778043332,
"learning_rate": 4.2991582729385174e-05,
"loss": 2.2421,
"step": 305
},
{
"epoch": 0.7908163265306123,
"grad_norm": 2.5462712929101894,
"learning_rate": 3.840891559982256e-05,
"loss": 2.2751,
"step": 310
},
{
"epoch": 0.8035714285714286,
"grad_norm": 2.307072702085812,
"learning_rate": 3.404843199558945e-05,
"loss": 2.1867,
"step": 315
},
{
"epoch": 0.8163265306122449,
"grad_norm": 5.263562323908358,
"learning_rate": 2.9918813861345952e-05,
"loss": 2.181,
"step": 320
},
{
"epoch": 0.8290816326530612,
"grad_norm": 2.0011040214883575,
"learning_rate": 2.6028283476858038e-05,
"loss": 2.1732,
"step": 325
},
{
"epoch": 0.8418367346938775,
"grad_norm": 2.2471501177416116,
"learning_rate": 2.238458708602039e-05,
"loss": 2.1108,
"step": 330
},
{
"epoch": 0.8545918367346939,
"grad_norm": 1.66846134386125,
"learning_rate": 1.8994979473690537e-05,
"loss": 2.1007,
"step": 335
},
{
"epoch": 0.8673469387755102,
"grad_norm": 2.0450866177774976,
"learning_rate": 1.5866209521043304e-05,
"loss": 2.0334,
"step": 340
},
{
"epoch": 0.8801020408163265,
"grad_norm": 2.1608778408402807,
"learning_rate": 1.3004506768205226e-05,
"loss": 2.0735,
"step": 345
},
{
"epoch": 0.8928571428571429,
"grad_norm": 2.1504307790627912,
"learning_rate": 1.0415569010922963e-05,
"loss": 2.0473,
"step": 350
},
{
"epoch": 0.9056122448979592,
"grad_norm": 1.6880900496760922,
"learning_rate": 8.104550955962469e-06,
"loss": 2.029,
"step": 355
},
{
"epoch": 0.9183673469387755,
"grad_norm": 1.7900392749998477,
"learning_rate": 6.076053957825411e-06,
"loss": 2.0533,
"step": 360
},
{
"epoch": 0.9311224489795918,
"grad_norm": 1.5178075882516209,
"learning_rate": 4.334116857218317e-06,
"loss": 1.9693,
"step": 365
},
{
"epoch": 0.9438775510204082,
"grad_norm": 1.6874487405997367,
"learning_rate": 2.882207939515435e-06,
"loss": 1.9791,
"step": 370
},
{
"epoch": 0.9566326530612245,
"grad_norm": 1.4244094850155935,
"learning_rate": 1.7232180292259369e-06,
"loss": 1.9566,
"step": 375
},
{
"epoch": 0.9693877551020408,
"grad_norm": 1.302432101303747,
"learning_rate": 8.594547342153979e-07,
"loss": 1.9451,
"step": 380
},
{
"epoch": 0.9821428571428571,
"grad_norm": 1.2049628347309629,
"learning_rate": 2.926378511411198e-07,
"loss": 1.9952,
"step": 385
},
{
"epoch": 0.9948979591836735,
"grad_norm": 1.2981702353996074,
"learning_rate": 2.3895941249507665e-08,
"loss": 1.9687,
"step": 390
},
{
"epoch": 1.0,
"eval_loss": 6.496990203857422,
"eval_runtime": 2.5179,
"eval_samples_per_second": 3.972,
"eval_steps_per_second": 0.397,
"step": 392
},
{
"epoch": 1.0,
"step": 392,
"total_flos": 13478110691328.0,
"train_loss": 7.205524507833987,
"train_runtime": 9216.1272,
"train_samples_per_second": 1.36,
"train_steps_per_second": 0.043
}
],
"logging_steps": 5,
"max_steps": 392,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 13478110691328.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}