gemma7b-fft-summarization-11-v1 / trainer_state.json
chansung's picture
Model save
e09581f verified
raw
history blame
5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9931662870159453,
"eval_steps": 500,
"global_step": 109,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009111617312072893,
"grad_norm": 2847.998104709306,
"learning_rate": 1.8181818181818183e-06,
"loss": 42.2184,
"step": 1
},
{
"epoch": 0.04555808656036447,
"grad_norm": 562.0305390689821,
"learning_rate": 9.090909090909091e-06,
"loss": 29.7876,
"step": 5
},
{
"epoch": 0.09111617312072894,
"grad_norm": 94.49823214647388,
"learning_rate": 1.8181818181818182e-05,
"loss": 18.8302,
"step": 10
},
{
"epoch": 0.1366742596810934,
"grad_norm": 54.33888379443643,
"learning_rate": 1.991790013823246e-05,
"loss": 7.0598,
"step": 15
},
{
"epoch": 0.18223234624145787,
"grad_norm": 24.89922969697027,
"learning_rate": 1.9586678530366607e-05,
"loss": 2.67,
"step": 20
},
{
"epoch": 0.22779043280182232,
"grad_norm": 95.75519440146691,
"learning_rate": 1.900968867902419e-05,
"loss": 2.059,
"step": 25
},
{
"epoch": 0.2733485193621868,
"grad_norm": 37.044831894790526,
"learning_rate": 1.820172254596956e-05,
"loss": 1.7679,
"step": 30
},
{
"epoch": 0.31890660592255127,
"grad_norm": 36.79419087973788,
"learning_rate": 1.7183493500977277e-05,
"loss": 1.5439,
"step": 35
},
{
"epoch": 0.36446469248291574,
"grad_norm": 29.940404675815028,
"learning_rate": 1.598110530491216e-05,
"loss": 1.4442,
"step": 40
},
{
"epoch": 0.41002277904328016,
"grad_norm": 4.190592272065602,
"learning_rate": 1.4625382902408356e-05,
"loss": 1.2511,
"step": 45
},
{
"epoch": 0.45558086560364464,
"grad_norm": 9.541987280500265,
"learning_rate": 1.315108218023621e-05,
"loss": 1.2099,
"step": 50
},
{
"epoch": 0.5011389521640092,
"grad_norm": 6.4393691976336305,
"learning_rate": 1.1595998950333794e-05,
"loss": 1.0664,
"step": 55
},
{
"epoch": 0.5466970387243736,
"grad_norm": 3.8451292577941585,
"learning_rate": 1e-05,
"loss": 0.968,
"step": 60
},
{
"epoch": 0.592255125284738,
"grad_norm": 2.4425581326450616,
"learning_rate": 8.404001049666211e-06,
"loss": 0.9188,
"step": 65
},
{
"epoch": 0.6378132118451025,
"grad_norm": 2.7821675174035865,
"learning_rate": 6.848917819763794e-06,
"loss": 0.881,
"step": 70
},
{
"epoch": 0.683371298405467,
"grad_norm": 4.6204696647361665,
"learning_rate": 5.37461709759165e-06,
"loss": 0.8782,
"step": 75
},
{
"epoch": 0.7289293849658315,
"grad_norm": 2.8829796033312296,
"learning_rate": 4.01889469508784e-06,
"loss": 0.8517,
"step": 80
},
{
"epoch": 0.7744874715261959,
"grad_norm": 1.6103793214527409,
"learning_rate": 2.8165064990227255e-06,
"loss": 0.8213,
"step": 85
},
{
"epoch": 0.8200455580865603,
"grad_norm": 1.3054680157700491,
"learning_rate": 1.7982774540304404e-06,
"loss": 0.8088,
"step": 90
},
{
"epoch": 0.8656036446469249,
"grad_norm": 1.0464961249658165,
"learning_rate": 9.903113209758098e-07,
"loss": 0.7988,
"step": 95
},
{
"epoch": 0.9111617312072893,
"grad_norm": 0.8824867589954551,
"learning_rate": 4.133214696333943e-07,
"loss": 0.7836,
"step": 100
},
{
"epoch": 0.9567198177676538,
"grad_norm": 0.8602109237194131,
"learning_rate": 8.209986176753947e-08,
"loss": 0.7827,
"step": 105
},
{
"epoch": 0.9931662870159453,
"eval_loss": 2.7200307846069336,
"eval_runtime": 0.4579,
"eval_samples_per_second": 45.861,
"eval_steps_per_second": 2.184,
"step": 109
},
{
"epoch": 0.9931662870159453,
"step": 109,
"total_flos": 14975678545920.0,
"train_loss": 3.682411685996099,
"train_runtime": 758.7359,
"train_samples_per_second": 37.012,
"train_steps_per_second": 0.144
}
],
"logging_steps": 5,
"max_steps": 109,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 14975678545920.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}