nat-hunt's picture
Training in progress, step 10, checkpoint
6875d5e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.04008016032064128,
"eval_steps": 3,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004008016032064128,
"grad_norm": 2.0643436908721924,
"learning_rate": 2e-05,
"loss": 1.5167,
"step": 1
},
{
"epoch": 0.004008016032064128,
"eval_loss": 1.9321434497833252,
"eval_runtime": 2.327,
"eval_samples_per_second": 45.122,
"eval_steps_per_second": 22.776,
"step": 1
},
{
"epoch": 0.008016032064128256,
"grad_norm": 2.7145376205444336,
"learning_rate": 4e-05,
"loss": 2.0837,
"step": 2
},
{
"epoch": 0.012024048096192385,
"grad_norm": 2.351701259613037,
"learning_rate": 6e-05,
"loss": 2.0507,
"step": 3
},
{
"epoch": 0.012024048096192385,
"eval_loss": 1.9191216230392456,
"eval_runtime": 2.325,
"eval_samples_per_second": 45.161,
"eval_steps_per_second": 22.796,
"step": 3
},
{
"epoch": 0.01603206412825651,
"grad_norm": 2.490091562271118,
"learning_rate": 8e-05,
"loss": 1.8264,
"step": 4
},
{
"epoch": 0.02004008016032064,
"grad_norm": 2.168489456176758,
"learning_rate": 0.0001,
"loss": 1.966,
"step": 5
},
{
"epoch": 0.02404809619238477,
"grad_norm": 2.344045639038086,
"learning_rate": 0.00012,
"loss": 1.7798,
"step": 6
},
{
"epoch": 0.02404809619238477,
"eval_loss": 1.7123405933380127,
"eval_runtime": 2.3355,
"eval_samples_per_second": 44.958,
"eval_steps_per_second": 22.693,
"step": 6
},
{
"epoch": 0.028056112224448898,
"grad_norm": 3.1197409629821777,
"learning_rate": 0.00014,
"loss": 1.9776,
"step": 7
},
{
"epoch": 0.03206412825651302,
"grad_norm": 2.1753883361816406,
"learning_rate": 0.00016,
"loss": 1.7457,
"step": 8
},
{
"epoch": 0.036072144288577156,
"grad_norm": 2.092257022857666,
"learning_rate": 0.00018,
"loss": 1.2922,
"step": 9
},
{
"epoch": 0.036072144288577156,
"eval_loss": 1.3578931093215942,
"eval_runtime": 2.3295,
"eval_samples_per_second": 45.073,
"eval_steps_per_second": 22.751,
"step": 9
},
{
"epoch": 0.04008016032064128,
"grad_norm": 2.7192161083221436,
"learning_rate": 0.0002,
"loss": 1.3041,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 673031965900800.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}