ClaimVer_Mistral-7B-v0.3-Chat / trainer_state.json
preetam7's picture
Upload 10 files
023a289 verified
raw
history blame
9.96 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9952941176470587,
"eval_steps": 500,
"global_step": 212,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.047058823529411764,
"grad_norm": 1.1233978271484375,
"learning_rate": 4.9931407070965254e-05,
"loss": 0.7646,
"num_input_tokens_seen": 112288,
"step": 5
},
{
"epoch": 0.09411764705882353,
"grad_norm": 0.8472293615341187,
"learning_rate": 4.97260046830541e-05,
"loss": 0.507,
"num_input_tokens_seen": 221152,
"step": 10
},
{
"epoch": 0.1411764705882353,
"grad_norm": 0.7263442277908325,
"learning_rate": 4.9384919968379945e-05,
"loss": 0.4389,
"num_input_tokens_seen": 332080,
"step": 15
},
{
"epoch": 0.18823529411764706,
"grad_norm": 0.6995546221733093,
"learning_rate": 4.891002460691306e-05,
"loss": 0.4212,
"num_input_tokens_seen": 446912,
"step": 20
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.6613317728042603,
"learning_rate": 4.83039245557597e-05,
"loss": 0.386,
"num_input_tokens_seen": 558000,
"step": 25
},
{
"epoch": 0.2823529411764706,
"grad_norm": 0.624698281288147,
"learning_rate": 4.756994574914359e-05,
"loss": 0.3521,
"num_input_tokens_seen": 669920,
"step": 30
},
{
"epoch": 0.32941176470588235,
"grad_norm": 0.6385682821273804,
"learning_rate": 4.6712115847560355e-05,
"loss": 0.3411,
"num_input_tokens_seen": 786032,
"step": 35
},
{
"epoch": 0.3764705882352941,
"grad_norm": 0.6398516297340393,
"learning_rate": 4.573514213625505e-05,
"loss": 0.3385,
"num_input_tokens_seen": 899424,
"step": 40
},
{
"epoch": 0.4235294117647059,
"grad_norm": 0.6650343537330627,
"learning_rate": 4.464438569430354e-05,
"loss": 0.3379,
"num_input_tokens_seen": 1017264,
"step": 45
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.6373805403709412,
"learning_rate": 4.344583197604318e-05,
"loss": 0.3198,
"num_input_tokens_seen": 1131984,
"step": 50
},
{
"epoch": 0.5176470588235295,
"grad_norm": 0.6740750074386597,
"learning_rate": 4.214605796628527e-05,
"loss": 0.3278,
"num_input_tokens_seen": 1248864,
"step": 55
},
{
"epoch": 0.5647058823529412,
"grad_norm": 0.7610446214675903,
"learning_rate": 4.075219608954278e-05,
"loss": 0.313,
"num_input_tokens_seen": 1364864,
"step": 60
},
{
"epoch": 0.611764705882353,
"grad_norm": 0.7002452611923218,
"learning_rate": 3.927189507131938e-05,
"loss": 0.3111,
"num_input_tokens_seen": 1477168,
"step": 65
},
{
"epoch": 0.6588235294117647,
"grad_norm": 0.6586229801177979,
"learning_rate": 3.7713277966230514e-05,
"loss": 0.311,
"num_input_tokens_seen": 1587472,
"step": 70
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.6844495534896851,
"learning_rate": 3.608489758327472e-05,
"loss": 0.3145,
"num_input_tokens_seen": 1705520,
"step": 75
},
{
"epoch": 0.7529411764705882,
"grad_norm": 0.6307282447814941,
"learning_rate": 3.4395689552855955e-05,
"loss": 0.3006,
"num_input_tokens_seen": 1818720,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 0.6304646730422974,
"learning_rate": 3.265492329309867e-05,
"loss": 0.3006,
"num_input_tokens_seen": 1934032,
"step": 85
},
{
"epoch": 0.8470588235294118,
"grad_norm": 0.6582378149032593,
"learning_rate": 3.0872151144524595e-05,
"loss": 0.3016,
"num_input_tokens_seen": 2040336,
"step": 90
},
{
"epoch": 0.8941176470588236,
"grad_norm": 0.7168837189674377,
"learning_rate": 2.9057155952211502e-05,
"loss": 0.2939,
"num_input_tokens_seen": 2149840,
"step": 95
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.6492182016372681,
"learning_rate": 2.7219897383073373e-05,
"loss": 0.2893,
"num_input_tokens_seen": 2262096,
"step": 100
},
{
"epoch": 0.9882352941176471,
"grad_norm": 0.6181011199951172,
"learning_rate": 2.537045727284232e-05,
"loss": 0.271,
"num_input_tokens_seen": 2371520,
"step": 105
},
{
"epoch": 1.035294117647059,
"grad_norm": 0.6379333138465881,
"learning_rate": 2.3518984302657146e-05,
"loss": 0.2694,
"num_input_tokens_seen": 2488736,
"step": 110
},
{
"epoch": 1.0823529411764705,
"grad_norm": 0.6440654397010803,
"learning_rate": 2.1675638308842145e-05,
"loss": 0.276,
"num_input_tokens_seen": 2604976,
"step": 115
},
{
"epoch": 1.1294117647058823,
"grad_norm": 0.7227081060409546,
"learning_rate": 1.9850534531472546e-05,
"loss": 0.2677,
"num_input_tokens_seen": 2719664,
"step": 120
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.6659607887268066,
"learning_rate": 1.8053688107658908e-05,
"loss": 0.277,
"num_input_tokens_seen": 2833008,
"step": 125
},
{
"epoch": 1.223529411764706,
"grad_norm": 0.6689413785934448,
"learning_rate": 1.6294959114140034e-05,
"loss": 0.2646,
"num_input_tokens_seen": 2946768,
"step": 130
},
{
"epoch": 1.2705882352941176,
"grad_norm": 0.6785162091255188,
"learning_rate": 1.4583998460759424e-05,
"loss": 0.2578,
"num_input_tokens_seen": 3057648,
"step": 135
},
{
"epoch": 1.3176470588235294,
"grad_norm": 0.7651886940002441,
"learning_rate": 1.2930194931731382e-05,
"loss": 0.2668,
"num_input_tokens_seen": 3168928,
"step": 140
},
{
"epoch": 1.3647058823529412,
"grad_norm": 0.7469376921653748,
"learning_rate": 1.1342623665304209e-05,
"loss": 0.2773,
"num_input_tokens_seen": 3285296,
"step": 145
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.6922341585159302,
"learning_rate": 9.829996354535172e-06,
"loss": 0.2675,
"num_input_tokens_seen": 3396800,
"step": 150
},
{
"epoch": 1.4588235294117646,
"grad_norm": 0.7332698702812195,
"learning_rate": 8.400613442446948e-06,
"loss": 0.2719,
"num_input_tokens_seen": 3510752,
"step": 155
},
{
"epoch": 1.5058823529411764,
"grad_norm": 0.684525191783905,
"learning_rate": 7.062318573891716e-06,
"loss": 0.2719,
"num_input_tokens_seen": 3623024,
"step": 160
},
{
"epoch": 1.5529411764705883,
"grad_norm": 0.69036465883255,
"learning_rate": 5.822455554065217e-06,
"loss": 0.2734,
"num_input_tokens_seen": 3738336,
"step": 165
},
{
"epoch": 1.6,
"grad_norm": 0.750205934047699,
"learning_rate": 4.687828049857967e-06,
"loss": 0.2547,
"num_input_tokens_seen": 3846560,
"step": 170
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.6747934222221375,
"learning_rate": 3.6646622551801345e-06,
"loss": 0.2473,
"num_input_tokens_seen": 3960208,
"step": 175
},
{
"epoch": 1.6941176470588235,
"grad_norm": 0.683767557144165,
"learning_rate": 2.75857272513132e-06,
"loss": 0.2679,
"num_input_tokens_seen": 4073792,
"step": 180
},
{
"epoch": 1.7411764705882353,
"grad_norm": 0.608447253704071,
"learning_rate": 1.9745315664982276e-06,
"loss": 0.2517,
"num_input_tokens_seen": 4185424,
"step": 185
},
{
"epoch": 1.788235294117647,
"grad_norm": 0.6212649941444397,
"learning_rate": 1.3168411536452152e-06,
"loss": 0.2485,
"num_input_tokens_seen": 4298448,
"step": 190
},
{
"epoch": 1.835294117647059,
"grad_norm": 0.6908031702041626,
"learning_rate": 7.891105195175358e-07,
"loss": 0.2614,
"num_input_tokens_seen": 4416480,
"step": 195
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.673697292804718,
"learning_rate": 3.9423555131007925e-07,
"loss": 0.2634,
"num_input_tokens_seen": 4529488,
"step": 200
},
{
"epoch": 1.9294117647058824,
"grad_norm": 0.6629529595375061,
"learning_rate": 1.343830994765982e-07,
"loss": 0.2527,
"num_input_tokens_seen": 4641008,
"step": 205
},
{
"epoch": 1.9764705882352942,
"grad_norm": 0.711607038974762,
"learning_rate": 1.0979087280141298e-08,
"loss": 0.2628,
"num_input_tokens_seen": 4751536,
"step": 210
},
{
"epoch": 1.9952941176470587,
"num_input_tokens_seen": 4796864,
"step": 212,
"total_flos": 2.0534733878748774e+17,
"train_loss": 0.31159005553092595,
"train_runtime": 2642.3532,
"train_samples_per_second": 2.573,
"train_steps_per_second": 0.08
}
],
"logging_steps": 5,
"max_steps": 212,
"num_input_tokens_seen": 4796864,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0534733878748774e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}