WangXFng's picture
Model save
7c5c68d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.999089792017476,
"eval_steps": 500,
"global_step": 5492,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18204159650480134,
"grad_norm": 3.854499101638794,
"learning_rate": 9.548269581056467e-05,
"loss": 1.0014,
"step": 250
},
{
"epoch": 0.3640831930096027,
"grad_norm": 3.5418713092803955,
"learning_rate": 9.092896174863389e-05,
"loss": 0.5401,
"step": 500
},
{
"epoch": 0.546124789514404,
"grad_norm": 3.089492082595825,
"learning_rate": 8.63752276867031e-05,
"loss": 0.4797,
"step": 750
},
{
"epoch": 0.7281663860192054,
"grad_norm": 3.0062525272369385,
"learning_rate": 8.182149362477231e-05,
"loss": 0.4598,
"step": 1000
},
{
"epoch": 0.9102079825240067,
"grad_norm": 2.703545093536377,
"learning_rate": 7.726775956284153e-05,
"loss": 0.4454,
"step": 1250
},
{
"epoch": 1.092249579028808,
"grad_norm": 2.665408134460449,
"learning_rate": 7.271402550091076e-05,
"loss": 0.4338,
"step": 1500
},
{
"epoch": 1.2742911755336095,
"grad_norm": 2.8605291843414307,
"learning_rate": 6.816029143897996e-05,
"loss": 0.4224,
"step": 1750
},
{
"epoch": 1.4563327720384107,
"grad_norm": 3.0206186771392822,
"learning_rate": 6.360655737704918e-05,
"loss": 0.4147,
"step": 2000
},
{
"epoch": 1.6383743685432122,
"grad_norm": 3.633958578109741,
"learning_rate": 5.90528233151184e-05,
"loss": 0.3983,
"step": 2250
},
{
"epoch": 1.8204159650480136,
"grad_norm": 3.535999059677124,
"learning_rate": 5.449908925318762e-05,
"loss": 0.3808,
"step": 2500
},
{
"epoch": 2.0024575615528146,
"grad_norm": 3.919354200363159,
"learning_rate": 4.994535519125683e-05,
"loss": 0.3642,
"step": 2750
},
{
"epoch": 2.184499158057616,
"grad_norm": 4.2734246253967285,
"learning_rate": 4.539162112932605e-05,
"loss": 0.3377,
"step": 3000
},
{
"epoch": 2.3665407545624175,
"grad_norm": 4.275780200958252,
"learning_rate": 4.083788706739526e-05,
"loss": 0.3261,
"step": 3250
},
{
"epoch": 2.548582351067219,
"grad_norm": 4.408421516418457,
"learning_rate": 3.6284153005464486e-05,
"loss": 0.31,
"step": 3500
},
{
"epoch": 2.7306239475720204,
"grad_norm": 4.726068019866943,
"learning_rate": 3.17304189435337e-05,
"loss": 0.2998,
"step": 3750
},
{
"epoch": 2.9126655440768214,
"grad_norm": 4.753103733062744,
"learning_rate": 2.7176684881602916e-05,
"loss": 0.2897,
"step": 4000
},
{
"epoch": 3.094707140581623,
"grad_norm": 4.001144886016846,
"learning_rate": 2.262295081967213e-05,
"loss": 0.2744,
"step": 4250
},
{
"epoch": 3.2767487370864243,
"grad_norm": 4.412220001220703,
"learning_rate": 1.806921675774135e-05,
"loss": 0.2619,
"step": 4500
},
{
"epoch": 3.458790333591226,
"grad_norm": 4.553833961486816,
"learning_rate": 1.3515482695810567e-05,
"loss": 0.2578,
"step": 4750
},
{
"epoch": 3.640831930096027,
"grad_norm": 4.323617458343506,
"learning_rate": 8.961748633879782e-06,
"loss": 0.2533,
"step": 5000
},
{
"epoch": 3.8228735266008282,
"grad_norm": 4.197403430938721,
"learning_rate": 4.408014571948998e-06,
"loss": 0.2489,
"step": 5250
},
{
"epoch": 3.999089792017476,
"step": 5492,
"total_flos": 1.2115246643852867e+18,
"train_loss": 0.38415815321442337,
"train_runtime": 38374.2384,
"train_samples_per_second": 13.742,
"train_steps_per_second": 0.143
}
],
"logging_steps": 250,
"max_steps": 5492,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.2115246643852867e+18,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}