{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.04067314047485891, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0008134628094971783, "grad_norm": 0.9108312726020813, "learning_rate": 1e-05, "loss": 1.4242, "step": 1 }, { "epoch": 0.0008134628094971783, "eval_loss": 1.6578611135482788, "eval_runtime": 195.6942, "eval_samples_per_second": 10.583, "eval_steps_per_second": 5.294, "step": 1 }, { "epoch": 0.0016269256189943566, "grad_norm": 0.8185848593711853, "learning_rate": 2e-05, "loss": 1.4684, "step": 2 }, { "epoch": 0.0024403884284915347, "grad_norm": 0.8753907680511475, "learning_rate": 3e-05, "loss": 1.4355, "step": 3 }, { "epoch": 0.0032538512379887133, "grad_norm": 0.8084831237792969, "learning_rate": 4e-05, "loss": 1.4623, "step": 4 }, { "epoch": 0.004067314047485892, "grad_norm": 0.776229202747345, "learning_rate": 5e-05, "loss": 1.4744, "step": 5 }, { "epoch": 0.0048807768569830694, "grad_norm": 0.7320299744606018, "learning_rate": 6e-05, "loss": 1.5194, "step": 6 }, { "epoch": 0.005694239666480248, "grad_norm": 0.8332337141036987, "learning_rate": 7e-05, "loss": 1.4006, "step": 7 }, { "epoch": 0.0065077024759774265, "grad_norm": 0.9180299639701843, "learning_rate": 8e-05, "loss": 1.4191, "step": 8 }, { "epoch": 0.007321165285474605, "grad_norm": 0.8599632382392883, "learning_rate": 9e-05, "loss": 1.3284, "step": 9 }, { "epoch": 0.008134628094971784, "grad_norm": 1.0169434547424316, "learning_rate": 0.0001, "loss": 1.3026, "step": 10 }, { "epoch": 0.008948090904468962, "grad_norm": 1.1758923530578613, "learning_rate": 9.98458666866564e-05, "loss": 1.3743, "step": 11 }, { "epoch": 0.009761553713966139, "grad_norm": 1.301285982131958, "learning_rate": 9.938441702975689e-05, "loss": 1.3781, "step": 12 }, { "epoch": 0.010575016523463317, "grad_norm": 1.1804258823394775, "learning_rate": 9.861849601988383e-05, "loss": 1.4496, "step": 13 }, { "epoch": 0.010575016523463317, "eval_loss": 1.3726698160171509, "eval_runtime": 36.4237, "eval_samples_per_second": 56.859, "eval_steps_per_second": 28.443, "step": 13 }, { "epoch": 0.011388479332960496, "grad_norm": 1.139792799949646, "learning_rate": 9.755282581475769e-05, "loss": 1.2662, "step": 14 }, { "epoch": 0.012201942142457674, "grad_norm": 1.2657827138900757, "learning_rate": 9.619397662556435e-05, "loss": 1.2368, "step": 15 }, { "epoch": 0.013015404951954853, "grad_norm": 0.998916745185852, "learning_rate": 9.45503262094184e-05, "loss": 1.1436, "step": 16 }, { "epoch": 0.013828867761452032, "grad_norm": 1.0009139776229858, "learning_rate": 9.263200821770461e-05, "loss": 1.239, "step": 17 }, { "epoch": 0.01464233057094921, "grad_norm": 1.1696265935897827, "learning_rate": 9.045084971874738e-05, "loss": 1.2521, "step": 18 }, { "epoch": 0.015455793380446387, "grad_norm": 0.9146084189414978, "learning_rate": 8.802029828000156e-05, "loss": 1.1897, "step": 19 }, { "epoch": 0.016269256189943567, "grad_norm": 0.9100729823112488, "learning_rate": 8.535533905932738e-05, "loss": 1.084, "step": 20 }, { "epoch": 0.017082718999440746, "grad_norm": 0.8398633599281311, "learning_rate": 8.247240241650918e-05, "loss": 1.2046, "step": 21 }, { "epoch": 0.017896181808937924, "grad_norm": 0.8264802694320679, "learning_rate": 7.938926261462366e-05, "loss": 1.2017, "step": 22 }, { "epoch": 0.0187096446184351, "grad_norm": 0.8314357399940491, "learning_rate": 7.612492823579745e-05, "loss": 1.2994, "step": 23 }, { "epoch": 0.019523107427932278, "grad_norm": 0.9849143028259277, "learning_rate": 7.269952498697734e-05, "loss": 1.363, "step": 24 }, { "epoch": 0.020336570237429456, "grad_norm": 1.168421983718872, "learning_rate": 6.91341716182545e-05, "loss": 1.2528, "step": 25 }, { "epoch": 0.021150033046926635, "grad_norm": 1.078614354133606, "learning_rate": 6.545084971874738e-05, "loss": 1.3879, "step": 26 }, { "epoch": 0.021150033046926635, "eval_loss": 1.2557233572006226, "eval_runtime": 36.435, "eval_samples_per_second": 56.841, "eval_steps_per_second": 28.434, "step": 26 }, { "epoch": 0.021963495856423813, "grad_norm": 0.7654521465301514, "learning_rate": 6.167226819279528e-05, "loss": 1.1113, "step": 27 }, { "epoch": 0.022776958665920992, "grad_norm": 0.7602103352546692, "learning_rate": 5.782172325201155e-05, "loss": 1.2211, "step": 28 }, { "epoch": 0.02359042147541817, "grad_norm": 0.8696973323822021, "learning_rate": 5.392295478639225e-05, "loss": 1.1937, "step": 29 }, { "epoch": 0.02440388428491535, "grad_norm": 0.6972202062606812, "learning_rate": 5e-05, "loss": 1.1481, "step": 30 }, { "epoch": 0.025217347094412527, "grad_norm": 0.8415080308914185, "learning_rate": 4.607704521360776e-05, "loss": 1.3055, "step": 31 }, { "epoch": 0.026030809903909706, "grad_norm": 0.6529223322868347, "learning_rate": 4.2178276747988446e-05, "loss": 1.2149, "step": 32 }, { "epoch": 0.026844272713406885, "grad_norm": 0.8038480281829834, "learning_rate": 3.832773180720475e-05, "loss": 1.1512, "step": 33 }, { "epoch": 0.027657735522904063, "grad_norm": 0.7951949238777161, "learning_rate": 3.4549150281252636e-05, "loss": 1.2931, "step": 34 }, { "epoch": 0.02847119833240124, "grad_norm": 0.6698573231697083, "learning_rate": 3.086582838174551e-05, "loss": 1.1721, "step": 35 }, { "epoch": 0.02928466114189842, "grad_norm": 0.7140055298805237, "learning_rate": 2.7300475013022663e-05, "loss": 1.1752, "step": 36 }, { "epoch": 0.0300981239513956, "grad_norm": 0.6723005175590515, "learning_rate": 2.3875071764202563e-05, "loss": 1.2337, "step": 37 }, { "epoch": 0.030911586760892774, "grad_norm": 0.6740083694458008, "learning_rate": 2.061073738537635e-05, "loss": 1.2048, "step": 38 }, { "epoch": 0.031725049570389956, "grad_norm": 0.7262203097343445, "learning_rate": 1.7527597583490822e-05, "loss": 1.3243, "step": 39 }, { "epoch": 0.031725049570389956, "eval_loss": 1.2246425151824951, "eval_runtime": 36.4241, "eval_samples_per_second": 56.858, "eval_steps_per_second": 28.443, "step": 39 }, { "epoch": 0.032538512379887134, "grad_norm": 0.7718028426170349, "learning_rate": 1.4644660940672627e-05, "loss": 1.2117, "step": 40 }, { "epoch": 0.03335197518938431, "grad_norm": 0.7411465048789978, "learning_rate": 1.1979701719998453e-05, "loss": 1.1954, "step": 41 }, { "epoch": 0.03416543799888149, "grad_norm": 0.7557942867279053, "learning_rate": 9.549150281252633e-06, "loss": 1.1628, "step": 42 }, { "epoch": 0.03497890080837867, "grad_norm": 0.9376694560050964, "learning_rate": 7.367991782295391e-06, "loss": 1.2205, "step": 43 }, { "epoch": 0.03579236361787585, "grad_norm": 0.7513824105262756, "learning_rate": 5.449673790581611e-06, "loss": 1.2009, "step": 44 }, { "epoch": 0.03660582642737303, "grad_norm": 0.7382362484931946, "learning_rate": 3.8060233744356633e-06, "loss": 1.2458, "step": 45 }, { "epoch": 0.0374192892368702, "grad_norm": 0.7333554625511169, "learning_rate": 2.4471741852423237e-06, "loss": 1.1459, "step": 46 }, { "epoch": 0.03823275204636738, "grad_norm": 0.6778184175491333, "learning_rate": 1.3815039801161721e-06, "loss": 1.1253, "step": 47 }, { "epoch": 0.039046214855864556, "grad_norm": 0.7480204105377197, "learning_rate": 6.15582970243117e-07, "loss": 1.2115, "step": 48 }, { "epoch": 0.039859677665361734, "grad_norm": 0.7647523283958435, "learning_rate": 1.5413331334360182e-07, "loss": 1.2324, "step": 49 }, { "epoch": 0.04067314047485891, "grad_norm": 0.8021819591522217, "learning_rate": 0.0, "loss": 1.2514, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.53307661205504e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }