ybkim95-ai's picture
Upload checkpoint-65/trainer_state.json with huggingface_hub
30f0d2c verified
raw
history blame
11.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.936936936936937,
"eval_steps": 500,
"global_step": 65,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07207207207207207,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.9883,
"step": 1
},
{
"epoch": 0.14414414414414414,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.4609,
"step": 2
},
{
"epoch": 0.21621621621621623,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.4512,
"step": 3
},
{
"epoch": 0.2882882882882883,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.1406,
"step": 4
},
{
"epoch": 0.36036036036036034,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.6152,
"step": 5
},
{
"epoch": 0.43243243243243246,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.1738,
"step": 6
},
{
"epoch": 0.5045045045045045,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.5547,
"step": 7
},
{
"epoch": 0.5765765765765766,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.459,
"step": 8
},
{
"epoch": 0.6486486486486487,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.123,
"step": 9
},
{
"epoch": 0.7207207207207207,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.6152,
"step": 10
},
{
"epoch": 0.7927927927927928,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.6719,
"step": 11
},
{
"epoch": 0.8648648648648649,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.877,
"step": 12
},
{
"epoch": 0.9369369369369369,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.459,
"step": 13
},
{
"epoch": 1.072072072072072,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 124.793,
"step": 14
},
{
"epoch": 1.1441441441441442,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 111.0137,
"step": 15
},
{
"epoch": 1.2162162162162162,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.7871,
"step": 16
},
{
"epoch": 1.2882882882882882,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.6484,
"step": 17
},
{
"epoch": 1.3603603603603602,
"grad_norm": 0.0,
"learning_rate": 0.0,
"loss": 110.6309,
"step": 18
},
{
"epoch": 1.4324324324324325,
"grad_norm": 121.69364311021961,
"learning_rate": 0.0001,
"loss": 111.0137,
"step": 19
},
{
"epoch": 1.5045045045045045,
"grad_norm": 121.82811999006017,
"learning_rate": 9.993977281025862e-05,
"loss": 102.374,
"step": 20
},
{
"epoch": 1.5765765765765765,
"grad_norm": 43.762035174980724,
"learning_rate": 9.975923633360985e-05,
"loss": 37.7349,
"step": 21
},
{
"epoch": 1.6486486486486487,
"grad_norm": 43.762035174980724,
"learning_rate": 9.975923633360985e-05,
"loss": 23.8311,
"step": 22
},
{
"epoch": 1.7207207207207207,
"grad_norm": 43.762035174980724,
"learning_rate": 9.975923633360985e-05,
"loss": 23.6587,
"step": 23
},
{
"epoch": 1.7927927927927927,
"grad_norm": 67.60606900572056,
"learning_rate": 9.945882549823906e-05,
"loss": 23.4604,
"step": 24
},
{
"epoch": 1.864864864864865,
"grad_norm": 47.84182107476679,
"learning_rate": 9.903926402016153e-05,
"loss": 22.4585,
"step": 25
},
{
"epoch": 1.936936936936937,
"grad_norm": 97.3110950444501,
"learning_rate": 9.850156265972721e-05,
"loss": 15.5953,
"step": 26
},
{
"epoch": 2.0720720720720722,
"grad_norm": 13.359584588085475,
"learning_rate": 9.784701678661045e-05,
"loss": 5.8495,
"step": 27
},
{
"epoch": 2.144144144144144,
"grad_norm": 15.499340904743185,
"learning_rate": 9.707720325915104e-05,
"loss": 3.2645,
"step": 28
},
{
"epoch": 2.2162162162162162,
"grad_norm": 5.2516204512990035,
"learning_rate": 9.619397662556435e-05,
"loss": 0.7318,
"step": 29
},
{
"epoch": 2.2882882882882885,
"grad_norm": 6.351586523632304,
"learning_rate": 9.519946465617218e-05,
"loss": 0.8066,
"step": 30
},
{
"epoch": 2.3603603603603602,
"grad_norm": 4.296773347519179,
"learning_rate": 9.409606321741775e-05,
"loss": 0.389,
"step": 31
},
{
"epoch": 2.4324324324324325,
"grad_norm": 3.7567276687430526,
"learning_rate": 9.288643050001361e-05,
"loss": 0.238,
"step": 32
},
{
"epoch": 2.5045045045045047,
"grad_norm": 0.8841967237890292,
"learning_rate": 9.157348061512727e-05,
"loss": 0.1217,
"step": 33
},
{
"epoch": 2.5765765765765765,
"grad_norm": 2.065381666847204,
"learning_rate": 9.016037657403224e-05,
"loss": 0.7948,
"step": 34
},
{
"epoch": 2.6486486486486487,
"grad_norm": 36.57744419486038,
"learning_rate": 8.865052266813685e-05,
"loss": 1.8392,
"step": 35
},
{
"epoch": 2.7207207207207205,
"grad_norm": 6.098258360827327,
"learning_rate": 8.704755626774796e-05,
"loss": 0.4845,
"step": 36
},
{
"epoch": 2.7927927927927927,
"grad_norm": 4.902366899534486,
"learning_rate": 8.535533905932738e-05,
"loss": 0.2728,
"step": 37
},
{
"epoch": 2.864864864864865,
"grad_norm": 3.3005947444072987,
"learning_rate": 8.357794774235092e-05,
"loss": 0.2294,
"step": 38
},
{
"epoch": 2.936936936936937,
"grad_norm": 0.5713662641479601,
"learning_rate": 8.171966420818228e-05,
"loss": 0.0271,
"step": 39
},
{
"epoch": 3.0720720720720722,
"grad_norm": 0.3093144415337581,
"learning_rate": 7.978496522462167e-05,
"loss": 0.0544,
"step": 40
},
{
"epoch": 3.144144144144144,
"grad_norm": 6.086269384303536,
"learning_rate": 7.777851165098012e-05,
"loss": 0.0918,
"step": 41
},
{
"epoch": 3.2162162162162162,
"grad_norm": 0.8619135383139304,
"learning_rate": 7.570513720966108e-05,
"loss": 0.0149,
"step": 42
},
{
"epoch": 3.2882882882882885,
"grad_norm": 0.170124990369711,
"learning_rate": 7.35698368412999e-05,
"loss": 0.0106,
"step": 43
},
{
"epoch": 3.3603603603603602,
"grad_norm": 0.2984404823393874,
"learning_rate": 7.137775467151411e-05,
"loss": 0.1274,
"step": 44
},
{
"epoch": 3.4324324324324325,
"grad_norm": 2.7538582006280317,
"learning_rate": 6.91341716182545e-05,
"loss": 1.8402,
"step": 45
},
{
"epoch": 3.5045045045045047,
"grad_norm": 2.7538582006280317,
"learning_rate": 6.91341716182545e-05,
"loss": 4.0066,
"step": 46
},
{
"epoch": 3.5765765765765765,
"grad_norm": 243.24225964046624,
"learning_rate": 6.6844492669611e-05,
"loss": 2.8196,
"step": 47
},
{
"epoch": 3.6486486486486487,
"grad_norm": 4.988008998052072,
"learning_rate": 6.451423386272312e-05,
"loss": 0.377,
"step": 48
},
{
"epoch": 3.7207207207207205,
"grad_norm": 0.2693118570806549,
"learning_rate": 6.21490089951632e-05,
"loss": 0.0247,
"step": 49
},
{
"epoch": 3.7927927927927927,
"grad_norm": 0.05521830464606099,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.0091,
"step": 50
},
{
"epoch": 3.864864864864865,
"grad_norm": 0.6910149711748117,
"learning_rate": 5.733652372276809e-05,
"loss": 0.0199,
"step": 51
},
{
"epoch": 3.936936936936937,
"grad_norm": 0.10849567725705088,
"learning_rate": 5.490085701647805e-05,
"loss": 0.0033,
"step": 52
},
{
"epoch": 4.072072072072072,
"grad_norm": 0.10809313247991727,
"learning_rate": 5.245338371637091e-05,
"loss": 0.0146,
"step": 53
},
{
"epoch": 4.1441441441441444,
"grad_norm": 0.05128671267400455,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 54
},
{
"epoch": 4.216216216216216,
"grad_norm": 0.04446616393871321,
"learning_rate": 4.7546616283629105e-05,
"loss": 0.0706,
"step": 55
},
{
"epoch": 4.288288288288288,
"grad_norm": 92.7258697131631,
"learning_rate": 4.509914298352197e-05,
"loss": 0.0734,
"step": 56
},
{
"epoch": 4.36036036036036,
"grad_norm": 0.10509705390750478,
"learning_rate": 4.2663476277231915e-05,
"loss": 0.0052,
"step": 57
},
{
"epoch": 4.4324324324324325,
"grad_norm": 0.05685107825836915,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.0009,
"step": 58
},
{
"epoch": 4.504504504504505,
"grad_norm": 0.0071402069274671244,
"learning_rate": 3.785099100483681e-05,
"loss": 0.0004,
"step": 59
},
{
"epoch": 4.576576576576577,
"grad_norm": 0.007037629263124771,
"learning_rate": 3.5485766137276894e-05,
"loss": 0.0033,
"step": 60
},
{
"epoch": 4.648648648648649,
"grad_norm": 0.07795566744834063,
"learning_rate": 3.3155507330389e-05,
"loss": 0.0067,
"step": 61
},
{
"epoch": 4.7207207207207205,
"grad_norm": 0.21346197540898718,
"learning_rate": 3.086582838174551e-05,
"loss": 0.0008,
"step": 62
},
{
"epoch": 4.792792792792793,
"grad_norm": 0.07957365304285995,
"learning_rate": 2.8622245328485907e-05,
"loss": 0.005,
"step": 63
},
{
"epoch": 4.864864864864865,
"grad_norm": 0.0022461733078342535,
"learning_rate": 2.6430163158700115e-05,
"loss": 0.0076,
"step": 64
},
{
"epoch": 4.936936936936937,
"grad_norm": 0.06343292790647254,
"learning_rate": 2.4294862790338917e-05,
"loss": 0.0001,
"step": 65
}
],
"logging_steps": 1.0,
"max_steps": 65,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.90161235112362e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}