dada22231's picture
Training in progress, step 63, checkpoint
404d392 verified
{
"best_metric": 0.8835566639900208,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 3.0407239819004523,
"eval_steps": 25,
"global_step": 63,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.048265460030165915,
"grad_norm": 0.8179398775100708,
"learning_rate": 5e-05,
"loss": 1.7729,
"step": 1
},
{
"epoch": 0.048265460030165915,
"eval_loss": 1.9362893104553223,
"eval_runtime": 3.1934,
"eval_samples_per_second": 15.657,
"eval_steps_per_second": 4.071,
"step": 1
},
{
"epoch": 0.09653092006033183,
"grad_norm": 0.8880188465118408,
"learning_rate": 0.0001,
"loss": 2.0026,
"step": 2
},
{
"epoch": 0.14479638009049775,
"grad_norm": 0.865829348564148,
"learning_rate": 9.994033404481737e-05,
"loss": 2.0396,
"step": 3
},
{
"epoch": 0.19306184012066366,
"grad_norm": 0.9091627597808838,
"learning_rate": 9.97614944026565e-05,
"loss": 2.0337,
"step": 4
},
{
"epoch": 0.24132730015082957,
"grad_norm": 1.1735113859176636,
"learning_rate": 9.946395532409847e-05,
"loss": 2.102,
"step": 5
},
{
"epoch": 0.2895927601809955,
"grad_norm": 0.6793145537376404,
"learning_rate": 9.904850582929111e-05,
"loss": 1.5544,
"step": 6
},
{
"epoch": 0.3378582202111614,
"grad_norm": 0.8480486273765564,
"learning_rate": 9.851624761560943e-05,
"loss": 1.5623,
"step": 7
},
{
"epoch": 0.3861236802413273,
"grad_norm": 0.8613075613975525,
"learning_rate": 9.786859213615221e-05,
"loss": 1.434,
"step": 8
},
{
"epoch": 0.4343891402714932,
"grad_norm": 1.1011731624603271,
"learning_rate": 9.710725685682222e-05,
"loss": 1.3384,
"step": 9
},
{
"epoch": 0.48265460030165913,
"grad_norm": 1.5078444480895996,
"learning_rate": 9.623426070191522e-05,
"loss": 1.2491,
"step": 10
},
{
"epoch": 0.530920060331825,
"grad_norm": 0.9261085987091064,
"learning_rate": 9.525191870029581e-05,
"loss": 1.1621,
"step": 11
},
{
"epoch": 0.579185520361991,
"grad_norm": 1.253775954246521,
"learning_rate": 9.4162835846357e-05,
"loss": 1.2139,
"step": 12
},
{
"epoch": 0.6274509803921569,
"grad_norm": 1.0710676908493042,
"learning_rate": 9.296990019204335e-05,
"loss": 1.1141,
"step": 13
},
{
"epoch": 0.6757164404223228,
"grad_norm": 0.7302868366241455,
"learning_rate": 9.167627518825651e-05,
"loss": 1.0311,
"step": 14
},
{
"epoch": 0.7239819004524887,
"grad_norm": 0.6581991314888,
"learning_rate": 9.028539129595199e-05,
"loss": 1.0503,
"step": 15
},
{
"epoch": 0.7722473604826546,
"grad_norm": 0.43014147877693176,
"learning_rate": 8.88009368891734e-05,
"loss": 1.051,
"step": 16
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.5059635639190674,
"learning_rate": 8.72268484741477e-05,
"loss": 1.0703,
"step": 17
},
{
"epoch": 0.8687782805429864,
"grad_norm": 0.5659109950065613,
"learning_rate": 8.556730025037819e-05,
"loss": 1.069,
"step": 18
},
{
"epoch": 0.9170437405731523,
"grad_norm": 0.5554922223091125,
"learning_rate": 8.38266930414179e-05,
"loss": 0.9979,
"step": 19
},
{
"epoch": 0.9653092006033183,
"grad_norm": 0.553922176361084,
"learning_rate": 8.200964262467657e-05,
"loss": 0.969,
"step": 20
},
{
"epoch": 1.0135746606334841,
"grad_norm": 0.5600609183311462,
"learning_rate": 8.01209674912089e-05,
"loss": 1.3362,
"step": 21
},
{
"epoch": 1.06184012066365,
"grad_norm": 0.3536495566368103,
"learning_rate": 7.81656760679424e-05,
"loss": 0.9588,
"step": 22
},
{
"epoch": 1.110105580693816,
"grad_norm": 0.413228303194046,
"learning_rate": 7.614895343622941e-05,
"loss": 0.9448,
"step": 23
},
{
"epoch": 1.1583710407239818,
"grad_norm": 0.4351142346858978,
"learning_rate": 7.407614758194375e-05,
"loss": 0.9537,
"step": 24
},
{
"epoch": 1.2066365007541477,
"grad_norm": 0.4623880684375763,
"learning_rate": 7.195275521358334e-05,
"loss": 0.9933,
"step": 25
},
{
"epoch": 1.2066365007541477,
"eval_loss": 0.907273530960083,
"eval_runtime": 3.2352,
"eval_samples_per_second": 15.455,
"eval_steps_per_second": 4.018,
"step": 25
},
{
"epoch": 1.2549019607843137,
"grad_norm": 0.5164334177970886,
"learning_rate": 6.978440718598757e-05,
"loss": 0.9255,
"step": 26
},
{
"epoch": 1.3031674208144797,
"grad_norm": 0.3558405339717865,
"learning_rate": 6.757685356832243e-05,
"loss": 0.9118,
"step": 27
},
{
"epoch": 1.3514328808446456,
"grad_norm": 0.3763732314109802,
"learning_rate": 6.533594839593081e-05,
"loss": 0.9167,
"step": 28
},
{
"epoch": 1.3996983408748114,
"grad_norm": 0.41895684599876404,
"learning_rate": 6.306763414648311e-05,
"loss": 0.8792,
"step": 29
},
{
"epoch": 1.4479638009049773,
"grad_norm": 0.4729040861129761,
"learning_rate": 6.07779259815948e-05,
"loss": 0.8689,
"step": 30
},
{
"epoch": 1.4962292609351433,
"grad_norm": 0.4113113284111023,
"learning_rate": 5.84728957956991e-05,
"loss": 0.8323,
"step": 31
},
{
"epoch": 1.544494720965309,
"grad_norm": 0.38738012313842773,
"learning_rate": 5.61586561144745e-05,
"loss": 0.9668,
"step": 32
},
{
"epoch": 1.5927601809954752,
"grad_norm": 0.4025406241416931,
"learning_rate": 5.384134388552552e-05,
"loss": 0.9097,
"step": 33
},
{
"epoch": 1.641025641025641,
"grad_norm": 0.4479067027568817,
"learning_rate": 5.152710420430091e-05,
"loss": 0.9427,
"step": 34
},
{
"epoch": 1.689291101055807,
"grad_norm": 0.44481536746025085,
"learning_rate": 4.9222074018405206e-05,
"loss": 0.8624,
"step": 35
},
{
"epoch": 1.737556561085973,
"grad_norm": 0.46713030338287354,
"learning_rate": 4.693236585351691e-05,
"loss": 0.8882,
"step": 36
},
{
"epoch": 1.7858220211161386,
"grad_norm": 0.3744671642780304,
"learning_rate": 4.4664051604069214e-05,
"loss": 0.9416,
"step": 37
},
{
"epoch": 1.8340874811463048,
"grad_norm": 0.44846299290657043,
"learning_rate": 4.2423146431677585e-05,
"loss": 1.0138,
"step": 38
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.4410933256149292,
"learning_rate": 4.021559281401244e-05,
"loss": 0.8851,
"step": 39
},
{
"epoch": 1.9306184012066365,
"grad_norm": 0.47139236330986023,
"learning_rate": 3.804724478641667e-05,
"loss": 0.8259,
"step": 40
},
{
"epoch": 1.9788838612368025,
"grad_norm": 0.5577002167701721,
"learning_rate": 3.592385241805628e-05,
"loss": 1.0754,
"step": 41
},
{
"epoch": 2.0271493212669682,
"grad_norm": 0.4690263569355011,
"learning_rate": 3.385104656377062e-05,
"loss": 0.9935,
"step": 42
},
{
"epoch": 2.0754147812971344,
"grad_norm": 0.38023629784584045,
"learning_rate": 3.183432393205763e-05,
"loss": 0.8952,
"step": 43
},
{
"epoch": 2.1236802413273,
"grad_norm": 0.41368529200553894,
"learning_rate": 2.9879032508791093e-05,
"loss": 0.9206,
"step": 44
},
{
"epoch": 2.171945701357466,
"grad_norm": 0.4637509882450104,
"learning_rate": 2.799035737532344e-05,
"loss": 0.826,
"step": 45
},
{
"epoch": 2.220211161387632,
"grad_norm": 0.510759711265564,
"learning_rate": 2.6173306958582123e-05,
"loss": 0.8312,
"step": 46
},
{
"epoch": 2.268476621417798,
"grad_norm": 0.4286244511604309,
"learning_rate": 2.443269974962181e-05,
"loss": 0.7866,
"step": 47
},
{
"epoch": 2.3167420814479636,
"grad_norm": 0.37086257338523865,
"learning_rate": 2.277315152585231e-05,
"loss": 0.7716,
"step": 48
},
{
"epoch": 2.3650075414781297,
"grad_norm": 0.415539026260376,
"learning_rate": 2.1199063110826618e-05,
"loss": 0.7904,
"step": 49
},
{
"epoch": 2.4132730015082955,
"grad_norm": 0.47227543592453003,
"learning_rate": 1.9714608704048037e-05,
"loss": 0.7771,
"step": 50
},
{
"epoch": 2.4132730015082955,
"eval_loss": 0.8835566639900208,
"eval_runtime": 3.2406,
"eval_samples_per_second": 15.429,
"eval_steps_per_second": 4.012,
"step": 50
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.4867364466190338,
"learning_rate": 1.8323724811743496e-05,
"loss": 0.7947,
"step": 51
},
{
"epoch": 2.5098039215686274,
"grad_norm": 0.4455429017543793,
"learning_rate": 1.703009980795665e-05,
"loss": 0.8328,
"step": 52
},
{
"epoch": 2.558069381598793,
"grad_norm": 0.46695399284362793,
"learning_rate": 1.5837164153643015e-05,
"loss": 0.9561,
"step": 53
},
{
"epoch": 2.6063348416289593,
"grad_norm": 0.4342208206653595,
"learning_rate": 1.474808129970421e-05,
"loss": 0.7705,
"step": 54
},
{
"epoch": 2.654600301659125,
"grad_norm": 0.45350006222724915,
"learning_rate": 1.3765739298084793e-05,
"loss": 0.7835,
"step": 55
},
{
"epoch": 2.7028657616892913,
"grad_norm": 0.5114506483078003,
"learning_rate": 1.2892743143177793e-05,
"loss": 0.7688,
"step": 56
},
{
"epoch": 2.751131221719457,
"grad_norm": 0.458067923784256,
"learning_rate": 1.2131407863847787e-05,
"loss": 0.8188,
"step": 57
},
{
"epoch": 2.7993966817496228,
"grad_norm": 0.396027535200119,
"learning_rate": 1.1483752384390584e-05,
"loss": 0.803,
"step": 58
},
{
"epoch": 2.847662141779789,
"grad_norm": 0.5041918158531189,
"learning_rate": 1.09514941707089e-05,
"loss": 0.9092,
"step": 59
},
{
"epoch": 2.8959276018099547,
"grad_norm": 0.49184009432792664,
"learning_rate": 1.0536044675901534e-05,
"loss": 0.8057,
"step": 60
},
{
"epoch": 2.944193061840121,
"grad_norm": 0.5316694974899292,
"learning_rate": 1.0238505597343493e-05,
"loss": 0.7522,
"step": 61
},
{
"epoch": 2.9924585218702866,
"grad_norm": 0.5798410773277283,
"learning_rate": 1.0059665955182628e-05,
"loss": 0.9344,
"step": 62
},
{
"epoch": 3.0407239819004523,
"grad_norm": 0.40940624475479126,
"learning_rate": 1e-05,
"loss": 0.8405,
"step": 63
}
],
"logging_steps": 1,
"max_steps": 63,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.086312841417851e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}