Aivesa's picture
Training in progress, step 10, checkpoint
e8e75c2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0003884136216657118,
"eval_steps": 3,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.8841362166571184e-05,
"grad_norm": 1337.6612548828125,
"learning_rate": 2e-05,
"loss": 48.3903,
"step": 1
},
{
"epoch": 7.768272433314237e-05,
"grad_norm": 1237.1090087890625,
"learning_rate": 4e-05,
"loss": 82.2165,
"step": 2
},
{
"epoch": 0.00011652408649971354,
"grad_norm": 867.312744140625,
"learning_rate": 6e-05,
"loss": 31.9198,
"step": 3
},
{
"epoch": 0.00011652408649971354,
"eval_loss": 11.575413703918457,
"eval_runtime": 261.1845,
"eval_samples_per_second": 41.507,
"eval_steps_per_second": 20.755,
"step": 3
},
{
"epoch": 0.00015536544866628474,
"grad_norm": 1267.2216796875,
"learning_rate": 8e-05,
"loss": 43.4303,
"step": 4
},
{
"epoch": 0.0001942068108328559,
"grad_norm": 1062.40234375,
"learning_rate": 0.0001,
"loss": 33.3487,
"step": 5
},
{
"epoch": 0.0002330481729994271,
"grad_norm": 1249.7735595703125,
"learning_rate": 0.00012,
"loss": 41.8347,
"step": 6
},
{
"epoch": 0.0002330481729994271,
"eval_loss": 11.542579650878906,
"eval_runtime": 259.9141,
"eval_samples_per_second": 41.71,
"eval_steps_per_second": 20.857,
"step": 6
},
{
"epoch": 0.00027188953516599825,
"grad_norm": 1034.145263671875,
"learning_rate": 0.00014,
"loss": 48.4583,
"step": 7
},
{
"epoch": 0.00031073089733256947,
"grad_norm": 1013.7915649414062,
"learning_rate": 0.00016,
"loss": 32.965,
"step": 8
},
{
"epoch": 0.00034957225949914063,
"grad_norm": 1290.4112548828125,
"learning_rate": 0.00018,
"loss": 63.7696,
"step": 9
},
{
"epoch": 0.00034957225949914063,
"eval_loss": 11.457053184509277,
"eval_runtime": 264.0088,
"eval_samples_per_second": 41.063,
"eval_steps_per_second": 20.533,
"step": 9
},
{
"epoch": 0.0003884136216657118,
"grad_norm": 929.0032958984375,
"learning_rate": 0.0002,
"loss": 43.6878,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 17996835717120.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}