|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 32000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.025, |
|
"grad_norm": 0.20501533150672913, |
|
"learning_rate": 0.000499987952239832, |
|
"loss": 1.3493, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.17424988746643066, |
|
"learning_rate": 0.0004999518101205162, |
|
"loss": 1.1647, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.075, |
|
"grad_norm": 0.24931852519512177, |
|
"learning_rate": 0.0004998915771255053, |
|
"loss": 1.1393, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.2063228040933609, |
|
"learning_rate": 0.0004998072590601808, |
|
"loss": 1.1057, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.125, |
|
"grad_norm": 0.23096567392349243, |
|
"learning_rate": 0.0004996988640512931, |
|
"loss": 1.1329, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.19355718791484833, |
|
"learning_rate": 0.000499566402546179, |
|
"loss": 1.1168, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.175, |
|
"grad_norm": 0.2221299558877945, |
|
"learning_rate": 0.0004994098873117539, |
|
"loss": 1.1316, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.2073143720626831, |
|
"learning_rate": 0.000499229333433282, |
|
"loss": 1.1141, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.225, |
|
"grad_norm": 0.21791169047355652, |
|
"learning_rate": 0.0004990247583129218, |
|
"loss": 1.1274, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.21467778086662292, |
|
"learning_rate": 0.0004987961816680492, |
|
"loss": 1.0698, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.275, |
|
"grad_norm": 0.2048187404870987, |
|
"learning_rate": 0.0004985436255293571, |
|
"loss": 1.0928, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.21932852268218994, |
|
"learning_rate": 0.0004982671142387316, |
|
"loss": 1.0906, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.325, |
|
"grad_norm": 0.2301362156867981, |
|
"learning_rate": 0.0004979666744469065, |
|
"loss": 1.0949, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.2186669409275055, |
|
"learning_rate": 0.0004976423351108943, |
|
"loss": 1.0857, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.375, |
|
"grad_norm": 0.19886991381645203, |
|
"learning_rate": 0.0004972941274911952, |
|
"loss": 1.0642, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.23275479674339294, |
|
"learning_rate": 0.0004969220851487844, |
|
"loss": 1.0718, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.425, |
|
"grad_norm": 0.21335655450820923, |
|
"learning_rate": 0.0004965262439418772, |
|
"loss": 1.082, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2357306331396103, |
|
"learning_rate": 0.0004961066420224729, |
|
"loss": 1.0937, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.475, |
|
"grad_norm": 0.23492847383022308, |
|
"learning_rate": 0.000495663319832678, |
|
"loss": 1.0846, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.2015356421470642, |
|
"learning_rate": 0.0004951963201008077, |
|
"loss": 1.0829, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.525, |
|
"grad_norm": 0.24353289604187012, |
|
"learning_rate": 0.0004947056878372681, |
|
"loss": 1.0688, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.27955085039138794, |
|
"learning_rate": 0.0004941914703302181, |
|
"loss": 1.0644, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.575, |
|
"grad_norm": 0.24444086849689484, |
|
"learning_rate": 0.0004936537171410112, |
|
"loss": 1.0946, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.22312547266483307, |
|
"learning_rate": 0.0004930924800994192, |
|
"loss": 1.0636, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 0.22395829856395721, |
|
"learning_rate": 0.000492507813298636, |
|
"loss": 1.0858, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.2249772697687149, |
|
"learning_rate": 0.0004918997730900649, |
|
"loss": 1.0885, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.675, |
|
"grad_norm": 0.22566111385822296, |
|
"learning_rate": 0.0004912684180778869, |
|
"loss": 1.0647, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.256917804479599, |
|
"learning_rate": 0.0004906138091134118, |
|
"loss": 1.0484, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.725, |
|
"grad_norm": 0.2598745822906494, |
|
"learning_rate": 0.0004899360092892143, |
|
"loss": 1.063, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.2576007843017578, |
|
"learning_rate": 0.0004892350839330522, |
|
"loss": 1.0585, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.775, |
|
"grad_norm": 0.2577688992023468, |
|
"learning_rate": 0.0004885111006015701, |
|
"loss": 1.0452, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2334214746952057, |
|
"learning_rate": 0.0004877641290737884, |
|
"loss": 1.0473, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.825, |
|
"grad_norm": 0.2869343161582947, |
|
"learning_rate": 0.0004869942413443776, |
|
"loss": 1.0412, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.26476991176605225, |
|
"learning_rate": 0.00048620151161671955, |
|
"loss": 1.064, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.875, |
|
"grad_norm": 0.2549433708190918, |
|
"learning_rate": 0.0004853860162957552, |
|
"loss": 1.0557, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.26020538806915283, |
|
"learning_rate": 0.0004845478339806211, |
|
"loss": 1.0424, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.925, |
|
"grad_norm": 0.2770324945449829, |
|
"learning_rate": 0.0004836870454570731, |
|
"loss": 1.0552, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.2561758756637573, |
|
"learning_rate": 0.00048280373368970086, |
|
"loss": 1.0336, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.975, |
|
"grad_norm": 0.25469744205474854, |
|
"learning_rate": 0.000481897983813931, |
|
"loss": 1.06, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.2685888707637787, |
|
"learning_rate": 0.0004809698831278217, |
|
"loss": 1.0151, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.0406872034072876, |
|
"eval_runtime": 152.4194, |
|
"eval_samples_per_second": 26.243, |
|
"eval_steps_per_second": 6.561, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.025, |
|
"grad_norm": 0.28361451625823975, |
|
"learning_rate": 0.00048001952108364876, |
|
"loss": 1.0277, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 0.26193392276763916, |
|
"learning_rate": 0.00047904698927928404, |
|
"loss": 1.0187, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.075, |
|
"grad_norm": 0.2700245678424835, |
|
"learning_rate": 0.0004780523814493669, |
|
"loss": 1.0325, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.2610750198364258, |
|
"learning_rate": 0.00047703579345627036, |
|
"loss": 1.0182, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.125, |
|
"grad_norm": 0.2965630888938904, |
|
"learning_rate": 0.0004759973232808609, |
|
"loss": 1.0105, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 0.25167468190193176, |
|
"learning_rate": 0.0004749370710130554, |
|
"loss": 1.0404, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.175, |
|
"grad_norm": 0.26605212688446045, |
|
"learning_rate": 0.0004738551388421742, |
|
"loss": 1.0291, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.25520554184913635, |
|
"learning_rate": 0.00047275163104709196, |
|
"loss": 1.019, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.225, |
|
"grad_norm": 0.26057741045951843, |
|
"learning_rate": 0.00047162665398618666, |
|
"loss": 1.0309, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.2987058758735657, |
|
"learning_rate": 0.00047048031608708875, |
|
"loss": 1.0127, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.275, |
|
"grad_norm": 0.2619779706001282, |
|
"learning_rate": 0.00046931272783623106, |
|
"loss": 1.0191, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 0.27090466022491455, |
|
"learning_rate": 0.0004681240017681993, |
|
"loss": 1.0416, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.325, |
|
"grad_norm": 0.24344772100448608, |
|
"learning_rate": 0.00046691425245488607, |
|
"loss": 1.0075, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 0.2597128450870514, |
|
"learning_rate": 0.00046568359649444796, |
|
"loss": 1.0147, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.375, |
|
"grad_norm": 0.3321911692619324, |
|
"learning_rate": 0.00046443215250006805, |
|
"loss": 1.0277, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.2736588716506958, |
|
"learning_rate": 0.00046316004108852305, |
|
"loss": 1.041, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 1.425, |
|
"grad_norm": 0.30011600255966187, |
|
"learning_rate": 0.0004618673848685586, |
|
"loss": 1.006, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 0.27296823263168335, |
|
"learning_rate": 0.0004605543084290716, |
|
"loss": 1.0207, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 1.475, |
|
"grad_norm": 0.2965295910835266, |
|
"learning_rate": 0.0004592209383271023, |
|
"loss": 1.0306, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 0.27299028635025024, |
|
"learning_rate": 0.00045786740307563633, |
|
"loss": 1.0296, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.525, |
|
"grad_norm": 0.2956341803073883, |
|
"learning_rate": 0.0004564938331312183, |
|
"loss": 0.9965, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 0.2554098665714264, |
|
"learning_rate": 0.0004551003608813784, |
|
"loss": 1.0232, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 1.575, |
|
"grad_norm": 0.2712690234184265, |
|
"learning_rate": 0.00045368712063187237, |
|
"loss": 1.02, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.31331920623779297, |
|
"learning_rate": 0.0004522542485937369, |
|
"loss": 0.971, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 1.625, |
|
"grad_norm": 0.3378168046474457, |
|
"learning_rate": 0.0004508018828701612, |
|
"loss": 1.0017, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 0.3126436471939087, |
|
"learning_rate": 0.0004493301634431768, |
|
"loss": 0.9969, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 1.675, |
|
"grad_norm": 0.2890426218509674, |
|
"learning_rate": 0.00044783923216016507, |
|
"loss": 1.0082, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"grad_norm": 0.3303642272949219, |
|
"learning_rate": 0.0004463292327201862, |
|
"loss": 0.9989, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 1.725, |
|
"grad_norm": 0.3025239408016205, |
|
"learning_rate": 0.00044480031066012916, |
|
"loss": 1.0305, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"grad_norm": 0.2934839725494385, |
|
"learning_rate": 0.0004432526133406842, |
|
"loss": 1.0003, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.775, |
|
"grad_norm": 0.31030675768852234, |
|
"learning_rate": 0.00044168628993214036, |
|
"loss": 0.982, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.30027347803115845, |
|
"learning_rate": 0.0004401014914000078, |
|
"loss": 0.9825, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 1.825, |
|
"grad_norm": 0.2832087278366089, |
|
"learning_rate": 0.00043849837049046735, |
|
"loss": 1.007, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"grad_norm": 0.30503541231155396, |
|
"learning_rate": 0.00043687708171564923, |
|
"loss": 1.0023, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 0.30458569526672363, |
|
"learning_rate": 0.0004352377813387398, |
|
"loss": 1.0006, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 0.3471212089061737, |
|
"learning_rate": 0.0004335806273589214, |
|
"loss": 1.0239, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 1.925, |
|
"grad_norm": 0.29310837388038635, |
|
"learning_rate": 0.00043190577949614375, |
|
"loss": 1.0129, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 0.294300377368927, |
|
"learning_rate": 0.0004302133991757297, |
|
"loss": 0.9956, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 1.975, |
|
"grad_norm": 0.3151143193244934, |
|
"learning_rate": 0.00042850364951281707, |
|
"loss": 1.0224, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.3241264820098877, |
|
"learning_rate": 0.00042677669529663686, |
|
"loss": 1.0234, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.0086950063705444, |
|
"eval_runtime": 152.329, |
|
"eval_samples_per_second": 26.259, |
|
"eval_steps_per_second": 6.565, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.025, |
|
"grad_norm": 0.3218720853328705, |
|
"learning_rate": 0.0004250327029746309, |
|
"loss": 1.0029, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 0.31330084800720215, |
|
"learning_rate": 0.000423271840636409, |
|
"loss": 0.9844, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 2.075, |
|
"grad_norm": 0.30206719040870667, |
|
"learning_rate": 0.00042149427799754817, |
|
"loss": 0.989, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 0.303648978471756, |
|
"learning_rate": 0.00041970018638323546, |
|
"loss": 0.9543, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 2.125, |
|
"grad_norm": 0.317037433385849, |
|
"learning_rate": 0.00041788973871175465, |
|
"loss": 0.9945, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 0.3210434913635254, |
|
"learning_rate": 0.00041606310947782046, |
|
"loss": 0.988, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 2.175, |
|
"grad_norm": 0.33763307332992554, |
|
"learning_rate": 0.00041422047473576033, |
|
"loss": 1.0066, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 0.30534419417381287, |
|
"learning_rate": 0.0004123620120825459, |
|
"loss": 1.0033, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 2.225, |
|
"grad_norm": 0.3363495171070099, |
|
"learning_rate": 0.00041048790064067577, |
|
"loss": 0.993, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"grad_norm": 0.2920531928539276, |
|
"learning_rate": 0.0004085983210409114, |
|
"loss": 0.9844, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.275, |
|
"grad_norm": 0.33446577191352844, |
|
"learning_rate": 0.0004066934554048674, |
|
"loss": 0.9846, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"grad_norm": 0.33012858033180237, |
|
"learning_rate": 0.00040477348732745853, |
|
"loss": 0.9858, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 2.325, |
|
"grad_norm": 0.3301050364971161, |
|
"learning_rate": 0.0004028386018592041, |
|
"loss": 0.9787, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"grad_norm": 0.3368571400642395, |
|
"learning_rate": 0.0004008889854883929, |
|
"loss": 0.9912, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 2.375, |
|
"grad_norm": 0.285281777381897, |
|
"learning_rate": 0.0003989248261231084, |
|
"loss": 0.9655, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.36711010336875916, |
|
"learning_rate": 0.0003969463130731183, |
|
"loss": 0.9753, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 2.425, |
|
"grad_norm": 0.28881680965423584, |
|
"learning_rate": 0.00039495363703162843, |
|
"loss": 0.978, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 0.3270690441131592, |
|
"learning_rate": 0.000392946990056903, |
|
"loss": 1.0242, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 2.475, |
|
"grad_norm": 0.3338991105556488, |
|
"learning_rate": 0.00039092656555375416, |
|
"loss": 1.0066, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.34082475304603577, |
|
"learning_rate": 0.00038889255825490053, |
|
"loss": 0.9781, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.525, |
|
"grad_norm": 0.34000107645988464, |
|
"learning_rate": 0.0003868451642021992, |
|
"loss": 0.9745, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 0.3570154309272766, |
|
"learning_rate": 0.0003847845807277501, |
|
"loss": 0.9759, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 2.575, |
|
"grad_norm": 0.33699771761894226, |
|
"learning_rate": 0.0003827110064348773, |
|
"loss": 0.9882, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 0.29268336296081543, |
|
"learning_rate": 0.0003806246411789872, |
|
"loss": 0.9745, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 2.625, |
|
"grad_norm": 0.3088798224925995, |
|
"learning_rate": 0.0003785256860483054, |
|
"loss": 0.9501, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 0.34749528765678406, |
|
"learning_rate": 0.0003764143433444962, |
|
"loss": 0.9702, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 2.675, |
|
"grad_norm": 0.33005061745643616, |
|
"learning_rate": 0.0003742908165631636, |
|
"loss": 0.987, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 0.3165997266769409, |
|
"learning_rate": 0.0003721553103742388, |
|
"loss": 1.0125, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 2.725, |
|
"grad_norm": 0.3213465213775635, |
|
"learning_rate": 0.0003700080306022528, |
|
"loss": 0.9613, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"grad_norm": 0.33442971110343933, |
|
"learning_rate": 0.0003678491842064995, |
|
"loss": 0.9825, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.775, |
|
"grad_norm": 0.2766638994216919, |
|
"learning_rate": 0.00036567897926108756, |
|
"loss": 0.9473, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.3419225215911865, |
|
"learning_rate": 0.00036349762493488667, |
|
"loss": 0.9462, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 2.825, |
|
"grad_norm": 0.3595256209373474, |
|
"learning_rate": 0.0003613053314713671, |
|
"loss": 0.9879, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"grad_norm": 0.3483733534812927, |
|
"learning_rate": 0.0003591023101683355, |
|
"loss": 0.9997, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 2.875, |
|
"grad_norm": 0.32273510098457336, |
|
"learning_rate": 0.0003568887733575705, |
|
"loss": 1.0057, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"grad_norm": 0.31241312623023987, |
|
"learning_rate": 0.00035466493438435703, |
|
"loss": 0.9675, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 2.925, |
|
"grad_norm": 0.37985849380493164, |
|
"learning_rate": 0.0003524310075869239, |
|
"loss": 0.9927, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"grad_norm": 0.33704909682273865, |
|
"learning_rate": 0.0003501872082757852, |
|
"loss": 0.9779, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 2.975, |
|
"grad_norm": 0.4041268229484558, |
|
"learning_rate": 0.000347933752712989, |
|
"loss": 0.9739, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.3415277302265167, |
|
"learning_rate": 0.0003456708580912725, |
|
"loss": 0.9995, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.9921265244483948, |
|
"eval_runtime": 153.1095, |
|
"eval_samples_per_second": 26.125, |
|
"eval_steps_per_second": 6.531, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.025, |
|
"grad_norm": 0.3260078430175781, |
|
"learning_rate": 0.0003433987425131291, |
|
"loss": 0.969, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"grad_norm": 0.3035444915294647, |
|
"learning_rate": 0.0003411176249697875, |
|
"loss": 0.9636, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 3.075, |
|
"grad_norm": 0.3195238411426544, |
|
"learning_rate": 0.00033882772532010404, |
|
"loss": 0.9522, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"grad_norm": 0.33937081694602966, |
|
"learning_rate": 0.0003365292642693733, |
|
"loss": 0.9425, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 0.37307849526405334, |
|
"learning_rate": 0.00033422246334805503, |
|
"loss": 0.9683, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"grad_norm": 0.3180018365383148, |
|
"learning_rate": 0.0003319075448904234, |
|
"loss": 0.9761, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 3.175, |
|
"grad_norm": 0.3785949647426605, |
|
"learning_rate": 0.00032958473201313745, |
|
"loss": 0.9748, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.3146439790725708, |
|
"learning_rate": 0.00032725424859373687, |
|
"loss": 0.9657, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 3.225, |
|
"grad_norm": 0.3667052686214447, |
|
"learning_rate": 0.00032491631924906416, |
|
"loss": 0.9685, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"grad_norm": 0.34998300671577454, |
|
"learning_rate": 0.00032257116931361555, |
|
"loss": 0.9821, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.275, |
|
"grad_norm": 0.34160691499710083, |
|
"learning_rate": 0.00032021902481782304, |
|
"loss": 0.9536, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"grad_norm": 0.35554495453834534, |
|
"learning_rate": 0.00031786011246626855, |
|
"loss": 0.9626, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 3.325, |
|
"grad_norm": 0.373349666595459, |
|
"learning_rate": 0.0003154946596158343, |
|
"loss": 0.9926, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"grad_norm": 0.38320091366767883, |
|
"learning_rate": 0.0003131228942537895, |
|
"loss": 0.9787, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 3.375, |
|
"grad_norm": 0.3836865723133087, |
|
"learning_rate": 0.000310745044975816, |
|
"loss": 0.9583, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 0.3358447551727295, |
|
"learning_rate": 0.0003083613409639764, |
|
"loss": 0.953, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 3.425, |
|
"grad_norm": 0.3501192629337311, |
|
"learning_rate": 0.00030597201196462466, |
|
"loss": 0.9346, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"grad_norm": 0.3595263957977295, |
|
"learning_rate": 0.00030357728826626266, |
|
"loss": 0.9656, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 3.475, |
|
"grad_norm": 0.3395259380340576, |
|
"learning_rate": 0.00030117740067734495, |
|
"loss": 0.9835, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 0.36751946806907654, |
|
"learning_rate": 0.0002987725805040321, |
|
"loss": 0.9465, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.525, |
|
"grad_norm": 0.348893404006958, |
|
"learning_rate": 0.0002963630595278977, |
|
"loss": 0.9564, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"grad_norm": 0.39346957206726074, |
|
"learning_rate": 0.0002939490699835887, |
|
"loss": 0.9508, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 3.575, |
|
"grad_norm": 0.33249151706695557, |
|
"learning_rate": 0.00029153084453644135, |
|
"loss": 0.9757, |
|
"step": 14300 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 0.3582550585269928, |
|
"learning_rate": 0.00028910861626005774, |
|
"loss": 0.9552, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 3.625, |
|
"grad_norm": 0.3562643229961395, |
|
"learning_rate": 0.00028668261861384045, |
|
"loss": 0.95, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"grad_norm": 0.3955428898334503, |
|
"learning_rate": 0.00028425308542049207, |
|
"loss": 0.9699, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 3.675, |
|
"grad_norm": 0.38744592666625977, |
|
"learning_rate": 0.0002818202508434783, |
|
"loss": 0.9838, |
|
"step": 14700 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"grad_norm": 0.404632031917572, |
|
"learning_rate": 0.00027938434936445943, |
|
"loss": 0.9589, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 3.725, |
|
"grad_norm": 0.36033356189727783, |
|
"learning_rate": 0.00027694561576068985, |
|
"loss": 0.973, |
|
"step": 14900 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 0.3457838296890259, |
|
"learning_rate": 0.0002745042850823902, |
|
"loss": 0.9801, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.775, |
|
"grad_norm": 0.35734304785728455, |
|
"learning_rate": 0.00027206059263009243, |
|
"loss": 0.9469, |
|
"step": 15100 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"grad_norm": 0.3542312681674957, |
|
"learning_rate": 0.00026961477393196127, |
|
"loss": 0.9336, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 3.825, |
|
"grad_norm": 0.3671669065952301, |
|
"learning_rate": 0.0002671670647210934, |
|
"loss": 0.9793, |
|
"step": 15300 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"grad_norm": 0.357881098985672, |
|
"learning_rate": 0.00026471770091279724, |
|
"loss": 0.9544, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 3.875, |
|
"grad_norm": 0.31071895360946655, |
|
"learning_rate": 0.00026226691858185456, |
|
"loss": 0.9656, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"grad_norm": 0.34388652443885803, |
|
"learning_rate": 0.00025981495393976716, |
|
"loss": 0.96, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 3.925, |
|
"grad_norm": 0.3791946768760681, |
|
"learning_rate": 0.00025736204331199084, |
|
"loss": 0.9797, |
|
"step": 15700 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"grad_norm": 0.3630838096141815, |
|
"learning_rate": 0.00025490842311515704, |
|
"loss": 0.9592, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 3.975, |
|
"grad_norm": 0.3712921440601349, |
|
"learning_rate": 0.0002524543298342875, |
|
"loss": 0.9731, |
|
"step": 15900 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.4159054756164551, |
|
"learning_rate": 0.00025, |
|
"loss": 0.9528, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.9823833107948303, |
|
"eval_runtime": 152.8782, |
|
"eval_samples_per_second": 26.165, |
|
"eval_steps_per_second": 6.541, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.025, |
|
"grad_norm": 0.39106351137161255, |
|
"learning_rate": 0.0002475456701657126, |
|
"loss": 0.9814, |
|
"step": 16100 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"grad_norm": 0.36192214488983154, |
|
"learning_rate": 0.00024509157688484297, |
|
"loss": 0.9369, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 4.075, |
|
"grad_norm": 0.3404277563095093, |
|
"learning_rate": 0.0002426379566880092, |
|
"loss": 0.9501, |
|
"step": 16300 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"grad_norm": 0.3791425824165344, |
|
"learning_rate": 0.00024018504606023293, |
|
"loss": 0.9336, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 4.125, |
|
"grad_norm": 0.4009718894958496, |
|
"learning_rate": 0.0002377330814181455, |
|
"loss": 0.9487, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"grad_norm": 0.37880653142929077, |
|
"learning_rate": 0.00023528229908720272, |
|
"loss": 0.9542, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 4.175, |
|
"grad_norm": 0.3936653733253479, |
|
"learning_rate": 0.00023283293527890658, |
|
"loss": 0.9388, |
|
"step": 16700 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 0.33316799998283386, |
|
"learning_rate": 0.0002303852260680388, |
|
"loss": 0.943, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 4.225, |
|
"grad_norm": 0.3236179053783417, |
|
"learning_rate": 0.00022793940736990766, |
|
"loss": 0.9044, |
|
"step": 16900 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"grad_norm": 0.3429449200630188, |
|
"learning_rate": 0.00022549571491760985, |
|
"loss": 0.9624, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 4.275, |
|
"grad_norm": 0.35557985305786133, |
|
"learning_rate": 0.00022305438423931017, |
|
"loss": 0.9226, |
|
"step": 17100 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"grad_norm": 1.0042773485183716, |
|
"learning_rate": 0.00022061565063554063, |
|
"loss": 0.9472, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 4.325, |
|
"grad_norm": 0.3724925220012665, |
|
"learning_rate": 0.00021817974915652172, |
|
"loss": 0.947, |
|
"step": 17300 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"grad_norm": 0.3926277756690979, |
|
"learning_rate": 0.00021574691457950805, |
|
"loss": 0.9478, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 4.375, |
|
"grad_norm": 0.38053977489471436, |
|
"learning_rate": 0.00021331738138615958, |
|
"loss": 0.9801, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"grad_norm": 0.36115145683288574, |
|
"learning_rate": 0.00021089138373994224, |
|
"loss": 0.9619, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 4.425, |
|
"grad_norm": 0.35729482769966125, |
|
"learning_rate": 0.0002084691554635587, |
|
"loss": 0.9549, |
|
"step": 17700 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"grad_norm": 0.3762321174144745, |
|
"learning_rate": 0.00020605093001641137, |
|
"loss": 0.9359, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 4.475, |
|
"grad_norm": 0.3317940831184387, |
|
"learning_rate": 0.00020363694047210228, |
|
"loss": 0.9571, |
|
"step": 17900 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 0.3733656406402588, |
|
"learning_rate": 0.00020122741949596797, |
|
"loss": 0.9409, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.525, |
|
"grad_norm": 0.3790251612663269, |
|
"learning_rate": 0.00019882259932265512, |
|
"loss": 0.9768, |
|
"step": 18100 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"grad_norm": 0.37900087237358093, |
|
"learning_rate": 0.00019642271173373735, |
|
"loss": 0.9208, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 4.575, |
|
"grad_norm": 0.34171342849731445, |
|
"learning_rate": 0.00019402798803537538, |
|
"loss": 0.9831, |
|
"step": 18300 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"grad_norm": 0.39172932505607605, |
|
"learning_rate": 0.00019163865903602372, |
|
"loss": 0.941, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 4.625, |
|
"grad_norm": 0.36279597878456116, |
|
"learning_rate": 0.00018925495502418406, |
|
"loss": 0.9511, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"grad_norm": 0.35040661692619324, |
|
"learning_rate": 0.00018687710574621051, |
|
"loss": 0.9713, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 4.675, |
|
"grad_norm": 0.35176095366477966, |
|
"learning_rate": 0.00018450534038416566, |
|
"loss": 0.9227, |
|
"step": 18700 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"grad_norm": 0.41925740242004395, |
|
"learning_rate": 0.00018213988753373146, |
|
"loss": 0.9454, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 4.725, |
|
"grad_norm": 0.4147469699382782, |
|
"learning_rate": 0.00017978097518217702, |
|
"loss": 0.9783, |
|
"step": 18900 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"grad_norm": 0.364629864692688, |
|
"learning_rate": 0.00017742883068638446, |
|
"loss": 0.963, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.775, |
|
"grad_norm": 0.39363783597946167, |
|
"learning_rate": 0.00017508368075093582, |
|
"loss": 0.9175, |
|
"step": 19100 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.39291703701019287, |
|
"learning_rate": 0.00017274575140626317, |
|
"loss": 0.9616, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 4.825, |
|
"grad_norm": 0.3810083866119385, |
|
"learning_rate": 0.0001704152679868626, |
|
"loss": 0.9754, |
|
"step": 19300 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"grad_norm": 0.36352667212486267, |
|
"learning_rate": 0.00016809245510957666, |
|
"loss": 0.953, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 4.875, |
|
"grad_norm": 0.31296002864837646, |
|
"learning_rate": 0.000165777536651945, |
|
"loss": 0.9478, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"grad_norm": 0.4004424214363098, |
|
"learning_rate": 0.0001634707357306267, |
|
"loss": 0.9508, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 4.925, |
|
"grad_norm": 0.3690031170845032, |
|
"learning_rate": 0.00016117227467989602, |
|
"loss": 0.9456, |
|
"step": 19700 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"grad_norm": 0.3946553170681, |
|
"learning_rate": 0.0001588823750302126, |
|
"loss": 0.95, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 4.975, |
|
"grad_norm": 0.39817288517951965, |
|
"learning_rate": 0.00015660125748687094, |
|
"loss": 0.9431, |
|
"step": 19900 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.37246620655059814, |
|
"learning_rate": 0.00015432914190872756, |
|
"loss": 0.9353, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.9754775762557983, |
|
"eval_runtime": 152.4185, |
|
"eval_samples_per_second": 26.244, |
|
"eval_steps_per_second": 6.561, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 5.025, |
|
"grad_norm": 0.36520034074783325, |
|
"learning_rate": 0.000152066247287011, |
|
"loss": 0.9377, |
|
"step": 20100 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"grad_norm": 0.3535405397415161, |
|
"learning_rate": 0.00014981279172421482, |
|
"loss": 0.9414, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 5.075, |
|
"grad_norm": 0.35102036595344543, |
|
"learning_rate": 0.00014756899241307614, |
|
"loss": 0.9164, |
|
"step": 20300 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"grad_norm": 0.3539367616176605, |
|
"learning_rate": 0.00014533506561564306, |
|
"loss": 0.9187, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 5.125, |
|
"grad_norm": 0.347476601600647, |
|
"learning_rate": 0.00014311122664242953, |
|
"loss": 0.9196, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"grad_norm": 0.3902195990085602, |
|
"learning_rate": 0.00014089768983166444, |
|
"loss": 0.9301, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 5.175, |
|
"grad_norm": 0.3888334631919861, |
|
"learning_rate": 0.000138694668528633, |
|
"loss": 0.9241, |
|
"step": 20700 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"grad_norm": 0.41042113304138184, |
|
"learning_rate": 0.00013650237506511331, |
|
"loss": 0.9504, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 5.225, |
|
"grad_norm": 0.3549586236476898, |
|
"learning_rate": 0.0001343210207389125, |
|
"loss": 0.955, |
|
"step": 20900 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"grad_norm": 0.34020912647247314, |
|
"learning_rate": 0.00013215081579350058, |
|
"loss": 0.9471, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 5.275, |
|
"grad_norm": 0.33093366026878357, |
|
"learning_rate": 0.00012999196939774722, |
|
"loss": 0.9351, |
|
"step": 21100 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"grad_norm": 0.4015860855579376, |
|
"learning_rate": 0.00012784468962576134, |
|
"loss": 0.9491, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 5.325, |
|
"grad_norm": 0.37603822350502014, |
|
"learning_rate": 0.00012570918343683636, |
|
"loss": 0.958, |
|
"step": 21300 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"grad_norm": 0.39434927701950073, |
|
"learning_rate": 0.0001235856566555039, |
|
"loss": 0.94, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 5.375, |
|
"grad_norm": 0.43162479996681213, |
|
"learning_rate": 0.0001214743139516946, |
|
"loss": 0.9291, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"grad_norm": 0.38297414779663086, |
|
"learning_rate": 0.00011937535882101281, |
|
"loss": 0.9494, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 5.425, |
|
"grad_norm": 0.40226617455482483, |
|
"learning_rate": 0.00011728899356512265, |
|
"loss": 0.9067, |
|
"step": 21700 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"grad_norm": 0.38879096508026123, |
|
"learning_rate": 0.00011521541927224994, |
|
"loss": 0.949, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 5.475, |
|
"grad_norm": 0.33627960085868835, |
|
"learning_rate": 0.00011315483579780094, |
|
"loss": 0.9358, |
|
"step": 21900 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"grad_norm": 0.4547683596611023, |
|
"learning_rate": 0.00011110744174509952, |
|
"loss": 0.9179, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 5.525, |
|
"grad_norm": 0.3883083462715149, |
|
"learning_rate": 0.00010907343444624579, |
|
"loss": 0.9662, |
|
"step": 22100 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"grad_norm": 0.4168870449066162, |
|
"learning_rate": 0.00010705300994309697, |
|
"loss": 0.9286, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 5.575, |
|
"grad_norm": 0.3810732066631317, |
|
"learning_rate": 0.00010504636296837161, |
|
"loss": 0.9443, |
|
"step": 22300 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"grad_norm": 0.40432223677635193, |
|
"learning_rate": 0.00010305368692688174, |
|
"loss": 0.9138, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 5.625, |
|
"grad_norm": 0.3702322542667389, |
|
"learning_rate": 0.00010107517387689166, |
|
"loss": 0.9474, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"grad_norm": 0.40569931268692017, |
|
"learning_rate": 9.911101451160715e-05, |
|
"loss": 0.9645, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 5.675, |
|
"grad_norm": 0.39476191997528076, |
|
"learning_rate": 9.716139814079594e-05, |
|
"loss": 0.9478, |
|
"step": 22700 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"grad_norm": 0.347662091255188, |
|
"learning_rate": 9.522651267254148e-05, |
|
"loss": 0.9627, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 5.725, |
|
"grad_norm": 0.36813852190971375, |
|
"learning_rate": 9.330654459513265e-05, |
|
"loss": 0.9328, |
|
"step": 22900 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"grad_norm": 0.32601600885391235, |
|
"learning_rate": 9.140167895908866e-05, |
|
"loss": 0.9547, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 5.775, |
|
"grad_norm": 0.40840694308280945, |
|
"learning_rate": 8.951209935932425e-05, |
|
"loss": 0.9113, |
|
"step": 23100 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"grad_norm": 0.38472625613212585, |
|
"learning_rate": 8.763798791745412e-05, |
|
"loss": 0.9667, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 5.825, |
|
"grad_norm": 0.37892642617225647, |
|
"learning_rate": 8.577952526423969e-05, |
|
"loss": 0.9241, |
|
"step": 23300 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"grad_norm": 0.3874419927597046, |
|
"learning_rate": 8.393689052217964e-05, |
|
"loss": 0.9539, |
|
"step": 23400 |
|
}, |
|
{ |
|
"epoch": 5.875, |
|
"grad_norm": 0.37583285570144653, |
|
"learning_rate": 8.211026128824539e-05, |
|
"loss": 0.9411, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"grad_norm": 0.37817347049713135, |
|
"learning_rate": 8.029981361676455e-05, |
|
"loss": 0.9426, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 5.925, |
|
"grad_norm": 0.32163432240486145, |
|
"learning_rate": 7.850572200245185e-05, |
|
"loss": 0.9512, |
|
"step": 23700 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"grad_norm": 0.37549078464508057, |
|
"learning_rate": 7.672815936359106e-05, |
|
"loss": 0.9694, |
|
"step": 23800 |
|
}, |
|
{ |
|
"epoch": 5.975, |
|
"grad_norm": 0.43039214611053467, |
|
"learning_rate": 7.496729702536912e-05, |
|
"loss": 0.9191, |
|
"step": 23900 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.3925749361515045, |
|
"learning_rate": 7.322330470336314e-05, |
|
"loss": 0.9121, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.9720398187637329, |
|
"eval_runtime": 152.9557, |
|
"eval_samples_per_second": 26.151, |
|
"eval_steps_per_second": 6.538, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 6.025, |
|
"grad_norm": 0.41935107111930847, |
|
"learning_rate": 7.149635048718294e-05, |
|
"loss": 0.9072, |
|
"step": 24100 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"grad_norm": 0.4102177321910858, |
|
"learning_rate": 6.97866008242703e-05, |
|
"loss": 0.9105, |
|
"step": 24200 |
|
}, |
|
{ |
|
"epoch": 6.075, |
|
"grad_norm": 0.3717847168445587, |
|
"learning_rate": 6.809422050385628e-05, |
|
"loss": 0.9399, |
|
"step": 24300 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"grad_norm": 0.3842828869819641, |
|
"learning_rate": 6.641937264107867e-05, |
|
"loss": 0.8992, |
|
"step": 24400 |
|
}, |
|
{ |
|
"epoch": 6.125, |
|
"grad_norm": 0.4051499366760254, |
|
"learning_rate": 6.476221866126028e-05, |
|
"loss": 0.9274, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"grad_norm": 0.4197865128517151, |
|
"learning_rate": 6.312291828435076e-05, |
|
"loss": 0.9646, |
|
"step": 24600 |
|
}, |
|
{ |
|
"epoch": 6.175, |
|
"grad_norm": 0.41968774795532227, |
|
"learning_rate": 6.150162950953264e-05, |
|
"loss": 0.9412, |
|
"step": 24700 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"grad_norm": 0.40155336260795593, |
|
"learning_rate": 5.989850859999227e-05, |
|
"loss": 0.9627, |
|
"step": 24800 |
|
}, |
|
{ |
|
"epoch": 6.225, |
|
"grad_norm": 0.39962807297706604, |
|
"learning_rate": 5.831371006785963e-05, |
|
"loss": 0.9337, |
|
"step": 24900 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"grad_norm": 0.4056980609893799, |
|
"learning_rate": 5.6747386659315755e-05, |
|
"loss": 0.9221, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 6.275, |
|
"grad_norm": 0.4260208308696747, |
|
"learning_rate": 5.519968933987082e-05, |
|
"loss": 0.9359, |
|
"step": 25100 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"grad_norm": 0.39483314752578735, |
|
"learning_rate": 5.367076727981382e-05, |
|
"loss": 0.9187, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 6.325, |
|
"grad_norm": 0.43257468938827515, |
|
"learning_rate": 5.216076783983492e-05, |
|
"loss": 0.9701, |
|
"step": 25300 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"grad_norm": 0.39114078879356384, |
|
"learning_rate": 5.066983655682325e-05, |
|
"loss": 0.9664, |
|
"step": 25400 |
|
}, |
|
{ |
|
"epoch": 6.375, |
|
"grad_norm": 0.39986172318458557, |
|
"learning_rate": 4.919811712983879e-05, |
|
"loss": 0.9345, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 0.41802579164505005, |
|
"learning_rate": 4.7745751406263163e-05, |
|
"loss": 0.9192, |
|
"step": 25600 |
|
}, |
|
{ |
|
"epoch": 6.425, |
|
"grad_norm": 0.34528008103370667, |
|
"learning_rate": 4.6312879368127645e-05, |
|
"loss": 0.953, |
|
"step": 25700 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"grad_norm": 0.418544739484787, |
|
"learning_rate": 4.4899639118621604e-05, |
|
"loss": 0.9121, |
|
"step": 25800 |
|
}, |
|
{ |
|
"epoch": 6.475, |
|
"grad_norm": 0.40490132570266724, |
|
"learning_rate": 4.350616686878175e-05, |
|
"loss": 0.9199, |
|
"step": 25900 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"grad_norm": 0.4175280034542084, |
|
"learning_rate": 4.213259692436367e-05, |
|
"loss": 0.9304, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 6.525, |
|
"grad_norm": 0.3687520921230316, |
|
"learning_rate": 4.077906167289766e-05, |
|
"loss": 0.953, |
|
"step": 26100 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"grad_norm": 0.389283686876297, |
|
"learning_rate": 3.944569157092839e-05, |
|
"loss": 0.9278, |
|
"step": 26200 |
|
}, |
|
{ |
|
"epoch": 6.575, |
|
"grad_norm": 0.353508323431015, |
|
"learning_rate": 3.8132615131441396e-05, |
|
"loss": 0.9488, |
|
"step": 26300 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"grad_norm": 0.3994731605052948, |
|
"learning_rate": 3.6839958911476953e-05, |
|
"loss": 0.8955, |
|
"step": 26400 |
|
}, |
|
{ |
|
"epoch": 6.625, |
|
"grad_norm": 0.37398815155029297, |
|
"learning_rate": 3.5567847499932e-05, |
|
"loss": 0.9453, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"grad_norm": 0.37087592482566833, |
|
"learning_rate": 3.431640350555204e-05, |
|
"loss": 0.9449, |
|
"step": 26600 |
|
}, |
|
{ |
|
"epoch": 6.675, |
|
"grad_norm": 0.3406459093093872, |
|
"learning_rate": 3.308574754511404e-05, |
|
"loss": 0.9143, |
|
"step": 26700 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"grad_norm": 0.36757832765579224, |
|
"learning_rate": 3.187599823180071e-05, |
|
"loss": 0.935, |
|
"step": 26800 |
|
}, |
|
{ |
|
"epoch": 6.725, |
|
"grad_norm": 0.40823671221733093, |
|
"learning_rate": 3.0687272163768986e-05, |
|
"loss": 0.9181, |
|
"step": 26900 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"grad_norm": 0.3942839801311493, |
|
"learning_rate": 2.9519683912911265e-05, |
|
"loss": 0.9219, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 6.775, |
|
"grad_norm": 0.3952127695083618, |
|
"learning_rate": 2.8373346013813417e-05, |
|
"loss": 0.9001, |
|
"step": 27100 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"grad_norm": 0.4122380018234253, |
|
"learning_rate": 2.7248368952908055e-05, |
|
"loss": 0.9266, |
|
"step": 27200 |
|
}, |
|
{ |
|
"epoch": 6.825, |
|
"grad_norm": 0.4118337631225586, |
|
"learning_rate": 2.6144861157825773e-05, |
|
"loss": 0.9364, |
|
"step": 27300 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"grad_norm": 0.39552587270736694, |
|
"learning_rate": 2.5062928986944677e-05, |
|
"loss": 0.9226, |
|
"step": 27400 |
|
}, |
|
{ |
|
"epoch": 6.875, |
|
"grad_norm": 0.4226832687854767, |
|
"learning_rate": 2.4002676719139166e-05, |
|
"loss": 0.9394, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"grad_norm": 0.34474310278892517, |
|
"learning_rate": 2.296420654372966e-05, |
|
"loss": 0.9255, |
|
"step": 27600 |
|
}, |
|
{ |
|
"epoch": 6.925, |
|
"grad_norm": 0.4371466636657715, |
|
"learning_rate": 2.1947618550633096e-05, |
|
"loss": 0.9298, |
|
"step": 27700 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"grad_norm": 0.4318234622478485, |
|
"learning_rate": 2.0953010720716037e-05, |
|
"loss": 0.9533, |
|
"step": 27800 |
|
}, |
|
{ |
|
"epoch": 6.975, |
|
"grad_norm": 0.38867416977882385, |
|
"learning_rate": 1.9980478916351297e-05, |
|
"loss": 0.9612, |
|
"step": 27900 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.38006094098091125, |
|
"learning_rate": 1.9030116872178316e-05, |
|
"loss": 0.9175, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.9707372784614563, |
|
"eval_runtime": 152.8648, |
|
"eval_samples_per_second": 26.167, |
|
"eval_steps_per_second": 6.542, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 7.025, |
|
"grad_norm": 0.4155292510986328, |
|
"learning_rate": 1.8102016186068992e-05, |
|
"loss": 0.936, |
|
"step": 28100 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"grad_norm": 0.3884825110435486, |
|
"learning_rate": 1.719626631029911e-05, |
|
"loss": 0.9398, |
|
"step": 28200 |
|
}, |
|
{ |
|
"epoch": 7.075, |
|
"grad_norm": 0.39031410217285156, |
|
"learning_rate": 1.6312954542926888e-05, |
|
"loss": 0.9056, |
|
"step": 28300 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"grad_norm": 0.4356350004673004, |
|
"learning_rate": 1.5452166019378987e-05, |
|
"loss": 0.9178, |
|
"step": 28400 |
|
}, |
|
{ |
|
"epoch": 7.125, |
|
"grad_norm": 0.3904324769973755, |
|
"learning_rate": 1.4613983704244827e-05, |
|
"loss": 0.9067, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"grad_norm": 0.3213234543800354, |
|
"learning_rate": 1.3798488383280488e-05, |
|
"loss": 0.9436, |
|
"step": 28600 |
|
}, |
|
{ |
|
"epoch": 7.175, |
|
"grad_norm": 0.38800540566444397, |
|
"learning_rate": 1.3005758655622424e-05, |
|
"loss": 0.9131, |
|
"step": 28700 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"grad_norm": 0.3734656870365143, |
|
"learning_rate": 1.2235870926211617e-05, |
|
"loss": 0.9466, |
|
"step": 28800 |
|
}, |
|
{ |
|
"epoch": 7.225, |
|
"grad_norm": 0.4088476002216339, |
|
"learning_rate": 1.1488899398429897e-05, |
|
"loss": 0.9444, |
|
"step": 28900 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"grad_norm": 0.3788207471370697, |
|
"learning_rate": 1.0764916066947795e-05, |
|
"loss": 0.9015, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 7.275, |
|
"grad_norm": 0.3710539638996124, |
|
"learning_rate": 1.0063990710785648e-05, |
|
"loss": 0.9356, |
|
"step": 29100 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"grad_norm": 0.39175015687942505, |
|
"learning_rate": 9.386190886588208e-06, |
|
"loss": 0.9277, |
|
"step": 29200 |
|
}, |
|
{ |
|
"epoch": 7.325, |
|
"grad_norm": 0.39857038855552673, |
|
"learning_rate": 8.731581922113152e-06, |
|
"loss": 0.9597, |
|
"step": 29300 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"grad_norm": 0.42670151591300964, |
|
"learning_rate": 8.10022690993506e-06, |
|
"loss": 0.9124, |
|
"step": 29400 |
|
}, |
|
{ |
|
"epoch": 7.375, |
|
"grad_norm": 0.37594255805015564, |
|
"learning_rate": 7.4921867013640064e-06, |
|
"loss": 0.9202, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"grad_norm": 0.34726592898368835, |
|
"learning_rate": 6.907519900580861e-06, |
|
"loss": 0.9462, |
|
"step": 29600 |
|
}, |
|
{ |
|
"epoch": 7.425, |
|
"grad_norm": 0.3583536744117737, |
|
"learning_rate": 6.34628285898875e-06, |
|
"loss": 0.9356, |
|
"step": 29700 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"grad_norm": 0.40716230869293213, |
|
"learning_rate": 5.808529669781903e-06, |
|
"loss": 0.9204, |
|
"step": 29800 |
|
}, |
|
{ |
|
"epoch": 7.475, |
|
"grad_norm": 0.4574654698371887, |
|
"learning_rate": 5.294312162731935e-06, |
|
"loss": 0.9272, |
|
"step": 29900 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.41072556376457214, |
|
"learning_rate": 4.803679899192393e-06, |
|
"loss": 0.9181, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 7.525, |
|
"grad_norm": 0.4989704191684723, |
|
"learning_rate": 4.336680167322055e-06, |
|
"loss": 0.9346, |
|
"step": 30100 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"grad_norm": 0.4089345932006836, |
|
"learning_rate": 3.893357977527101e-06, |
|
"loss": 0.9392, |
|
"step": 30200 |
|
}, |
|
{ |
|
"epoch": 7.575, |
|
"grad_norm": 0.4189968407154083, |
|
"learning_rate": 3.4737560581228343e-06, |
|
"loss": 0.9338, |
|
"step": 30300 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"grad_norm": 0.40372830629348755, |
|
"learning_rate": 3.077914851215585e-06, |
|
"loss": 0.9394, |
|
"step": 30400 |
|
}, |
|
{ |
|
"epoch": 7.625, |
|
"grad_norm": 0.3864797055721283, |
|
"learning_rate": 2.7058725088047465e-06, |
|
"loss": 0.9283, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"grad_norm": 0.4125591814517975, |
|
"learning_rate": 2.357664889105687e-06, |
|
"loss": 0.9731, |
|
"step": 30600 |
|
}, |
|
{ |
|
"epoch": 7.675, |
|
"grad_norm": 0.4122030436992645, |
|
"learning_rate": 2.0333255530934903e-06, |
|
"loss": 0.9257, |
|
"step": 30700 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"grad_norm": 0.40973401069641113, |
|
"learning_rate": 1.7328857612684267e-06, |
|
"loss": 0.8964, |
|
"step": 30800 |
|
}, |
|
{ |
|
"epoch": 7.725, |
|
"grad_norm": 0.36363255977630615, |
|
"learning_rate": 1.4563744706429517e-06, |
|
"loss": 0.9012, |
|
"step": 30900 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"grad_norm": 0.39668798446655273, |
|
"learning_rate": 1.2038183319507957e-06, |
|
"loss": 0.9399, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 7.775, |
|
"grad_norm": 0.40102848410606384, |
|
"learning_rate": 9.752416870782156e-07, |
|
"loss": 0.9471, |
|
"step": 31100 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"grad_norm": 0.43242618441581726, |
|
"learning_rate": 7.70666566718009e-07, |
|
"loss": 0.9289, |
|
"step": 31200 |
|
}, |
|
{ |
|
"epoch": 7.825, |
|
"grad_norm": 0.41685134172439575, |
|
"learning_rate": 5.90112688246075e-07, |
|
"loss": 0.9365, |
|
"step": 31300 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"grad_norm": 0.36857473850250244, |
|
"learning_rate": 4.335974538210441e-07, |
|
"loss": 0.9324, |
|
"step": 31400 |
|
}, |
|
{ |
|
"epoch": 7.875, |
|
"grad_norm": 0.42975786328315735, |
|
"learning_rate": 3.0113594870689873e-07, |
|
"loss": 0.9379, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"grad_norm": 0.42332813143730164, |
|
"learning_rate": 1.9274093981927476e-07, |
|
"loss": 0.9453, |
|
"step": 31600 |
|
}, |
|
{ |
|
"epoch": 7.925, |
|
"grad_norm": 0.41806164383888245, |
|
"learning_rate": 1.0842287449469579e-07, |
|
"loss": 0.9055, |
|
"step": 31700 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"grad_norm": 0.35621610283851624, |
|
"learning_rate": 4.818987948379538e-08, |
|
"loss": 0.8726, |
|
"step": 31800 |
|
}, |
|
{ |
|
"epoch": 7.975, |
|
"grad_norm": 0.43373915553092957, |
|
"learning_rate": 1.2047760167999133e-08, |
|
"loss": 0.946, |
|
"step": 31900 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.4070851504802704, |
|
"learning_rate": 0.0, |
|
"loss": 0.9197, |
|
"step": 32000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 32000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 8, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.2123286020096e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|