gemma-2-2b-it-lora-jap-en / trainer_state.json
alvarobartt's picture
alvarobartt HF staff
Model save
b9be91b verified
raw
history blame
1.69 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.909090909090909,
"eval_steps": 500,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.6060606060606061,
"grad_norm": 2.365993915131312,
"learning_rate": 0.0001934016108732548,
"loss": 4.9025,
"step": 10
},
{
"epoch": 1.2121212121212122,
"grad_norm": 0.5010752859822769,
"learning_rate": 0.00014572423233046386,
"loss": 2.152,
"step": 20
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.32099488413380073,
"learning_rate": 7.470666176083192e-05,
"loss": 1.8198,
"step": 30
},
{
"epoch": 2.4242424242424243,
"grad_norm": 0.2622629063842015,
"learning_rate": 1.660021821101222e-05,
"loss": 1.7642,
"step": 40
},
{
"epoch": 2.909090909090909,
"step": 48,
"total_flos": 88796276195328.0,
"train_loss": 2.5086214343706765,
"train_runtime": 154.8962,
"train_samples_per_second": 4.997,
"train_steps_per_second": 0.31
}
],
"logging_steps": 10,
"max_steps": 48,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 88796276195328.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}