|
{ |
|
"best_metric": 2.6527161598205566, |
|
"best_model_checkpoint": "./results/checkpoint-2000", |
|
"epoch": 0.9691096305269533, |
|
"eval_steps": 250, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04845548152634767, |
|
"grad_norm": 0.2021484375, |
|
"learning_rate": 0.0019926230341909047, |
|
"loss": 5.5548, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09691096305269534, |
|
"grad_norm": 0.484375, |
|
"learning_rate": 0.0019623193935821215, |
|
"loss": 4.0801, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12113870381586916, |
|
"eval_loss": 3.651378631591797, |
|
"eval_runtime": 1225.4443, |
|
"eval_samples_per_second": 47.902, |
|
"eval_steps_per_second": 1.497, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.145366444579043, |
|
"grad_norm": 0.6796875, |
|
"learning_rate": 0.0019092830690827923, |
|
"loss": 3.6511, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.19382192610539067, |
|
"grad_norm": 0.474609375, |
|
"learning_rate": 0.0018347669274724923, |
|
"loss": 3.4945, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.24227740763173833, |
|
"grad_norm": 0.51171875, |
|
"learning_rate": 0.0017405312490731885, |
|
"loss": 3.3662, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.24227740763173833, |
|
"eval_loss": 3.2901480197906494, |
|
"eval_runtime": 1223.1792, |
|
"eval_samples_per_second": 47.991, |
|
"eval_steps_per_second": 1.5, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.290732889158086, |
|
"grad_norm": 0.90234375, |
|
"learning_rate": 0.0016288021449895208, |
|
"loss": 3.2248, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3391883706844337, |
|
"grad_norm": 0.5859375, |
|
"learning_rate": 0.001502218970119089, |
|
"loss": 3.1488, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3634161114476075, |
|
"eval_loss": 3.054584503173828, |
|
"eval_runtime": 1225.6755, |
|
"eval_samples_per_second": 47.893, |
|
"eval_steps_per_second": 1.497, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.38764385221078135, |
|
"grad_norm": 0.5546875, |
|
"learning_rate": 0.001363771974184993, |
|
"loss": 3.0595, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.436099333737129, |
|
"grad_norm": 0.498046875, |
|
"learning_rate": 0.001216731663648138, |
|
"loss": 2.9756, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.48455481526347666, |
|
"grad_norm": 0.423828125, |
|
"learning_rate": 0.0010645715431691336, |
|
"loss": 2.9173, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.48455481526347666, |
|
"eval_loss": 2.87882399559021, |
|
"eval_runtime": 1223.6, |
|
"eval_samples_per_second": 47.974, |
|
"eval_steps_per_second": 1.5, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5330102967898244, |
|
"grad_norm": 0.431640625, |
|
"learning_rate": 0.0009108860616831929, |
|
"loss": 2.853, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.581465778316172, |
|
"grad_norm": 0.546875, |
|
"learning_rate": 0.0007593057014319566, |
|
"loss": 2.7923, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6056935190793459, |
|
"eval_loss": 2.7545697689056396, |
|
"eval_runtime": 1225.0225, |
|
"eval_samples_per_second": 47.918, |
|
"eval_steps_per_second": 1.498, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.6299212598425197, |
|
"grad_norm": 0.392578125, |
|
"learning_rate": 0.0006134112157875245, |
|
"loss": 2.7647, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6783767413688674, |
|
"grad_norm": 0.30078125, |
|
"learning_rate": 0.00047664904181190284, |
|
"loss": 2.7262, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.726832222895215, |
|
"grad_norm": 0.337890625, |
|
"learning_rate": 0.0003522498857445232, |
|
"loss": 2.692, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.726832222895215, |
|
"eval_loss": 2.6806631088256836, |
|
"eval_runtime": 1223.5768, |
|
"eval_samples_per_second": 47.975, |
|
"eval_steps_per_second": 1.5, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7752877044215627, |
|
"grad_norm": 0.302734375, |
|
"learning_rate": 0.0002431524046570296, |
|
"loss": 2.6635, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.8237431859479104, |
|
"grad_norm": 0.33203125, |
|
"learning_rate": 0.00015193378712876149, |
|
"loss": 2.6561, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.8479709267110842, |
|
"eval_loss": 2.655076503753662, |
|
"eval_runtime": 1225.3417, |
|
"eval_samples_per_second": 47.906, |
|
"eval_steps_per_second": 1.498, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.872198667474258, |
|
"grad_norm": 0.298828125, |
|
"learning_rate": 8.074887282213439e-05, |
|
"loss": 2.6481, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.9206541490006057, |
|
"grad_norm": 0.26953125, |
|
"learning_rate": 3.127924912435132e-05, |
|
"loss": 2.6565, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.9691096305269533, |
|
"grad_norm": 0.416015625, |
|
"learning_rate": 4.693527335575154e-06, |
|
"loss": 2.6513, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9691096305269533, |
|
"eval_loss": 2.6527161598205566, |
|
"eval_runtime": 1223.4873, |
|
"eval_samples_per_second": 47.978, |
|
"eval_steps_per_second": 1.5, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2063, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 250, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.678589479911424e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|