|
{ |
|
"best_metric": 0.03718467801809311, |
|
"best_model_checkpoint": "doc-topic-model_eval-00_train-02/checkpoint-13000", |
|
"epoch": 8.875739644970414, |
|
"eval_steps": 1000, |
|
"global_step": 18000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3767470121383667, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1663, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.4007192850112915, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0932, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.9814660487265615, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.0863451287150383, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 11.8139, |
|
"eval_samples_per_second": 686.478, |
|
"eval_steps_per_second": 2.709, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.4121333062648773, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0831, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.35336044430732727, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0742, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.9814660487265615, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.06615250557661057, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 11.8646, |
|
"eval_samples_per_second": 683.543, |
|
"eval_steps_per_second": 2.697, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.4058854281902313, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0654, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.4402715861797333, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0596, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9825927973128109, |
|
"eval_f1": 0.1370151770657673, |
|
"eval_loss": 0.05494347959756851, |
|
"eval_precision": 0.8441558441558441, |
|
"eval_recall": 0.07455838495067676, |
|
"eval_runtime": 14.1809, |
|
"eval_samples_per_second": 571.895, |
|
"eval_steps_per_second": 2.257, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.39516812562942505, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0558, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.3793538510799408, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.053, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9848675538925975, |
|
"eval_f1": 0.38985084861992114, |
|
"eval_loss": 0.049108367413282394, |
|
"eval_precision": 0.771370420624152, |
|
"eval_recall": 0.26083964211975225, |
|
"eval_runtime": 14.0996, |
|
"eval_samples_per_second": 575.194, |
|
"eval_steps_per_second": 2.27, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.33267995715141296, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0474, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.3178996443748474, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0467, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9857207647717449, |
|
"eval_f1": 0.45265388167544957, |
|
"eval_loss": 0.045239612460136414, |
|
"eval_precision": 0.7816135084427768, |
|
"eval_recall": 0.31857459662001986, |
|
"eval_runtime": 14.2568, |
|
"eval_samples_per_second": 568.853, |
|
"eval_steps_per_second": 2.245, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.41016682982444763, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.30701619386672974, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.044, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.986354295108919, |
|
"eval_f1": 0.5022231413504291, |
|
"eval_loss": 0.04268515482544899, |
|
"eval_precision": 0.7752593774940144, |
|
"eval_recall": 0.3714154622619867, |
|
"eval_runtime": 14.3323, |
|
"eval_samples_per_second": 565.856, |
|
"eval_steps_per_second": 2.233, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.3377997875213623, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.0391, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.3832477629184723, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.039, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9867454682030131, |
|
"eval_f1": 0.5505142747284437, |
|
"eval_loss": 0.04092588648200035, |
|
"eval_precision": 0.7409755466425152, |
|
"eval_recall": 0.43794448267951364, |
|
"eval_runtime": 14.2213, |
|
"eval_samples_per_second": 570.271, |
|
"eval_steps_per_second": 2.25, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.5340074896812439, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0388, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.40983426570892334, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.037, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.986977904389359, |
|
"eval_f1": 0.5589477726574501, |
|
"eval_loss": 0.039010029286146164, |
|
"eval_precision": 0.7507414571244359, |
|
"eval_recall": 0.445209145828554, |
|
"eval_runtime": 14.193, |
|
"eval_samples_per_second": 571.408, |
|
"eval_steps_per_second": 2.255, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.5406789183616638, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0354, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.3440360724925995, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0337, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9875023031024561, |
|
"eval_f1": 0.5771960107403146, |
|
"eval_loss": 0.03828894719481468, |
|
"eval_precision": 0.7737498393109654, |
|
"eval_recall": 0.4602737630955112, |
|
"eval_runtime": 14.3368, |
|
"eval_samples_per_second": 565.678, |
|
"eval_steps_per_second": 2.232, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.44961240887641907, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0328, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.4392942488193512, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9875349008602974, |
|
"eval_f1": 0.5916709225126515, |
|
"eval_loss": 0.037467557936906815, |
|
"eval_precision": 0.7530134719924367, |
|
"eval_recall": 0.48726772195457674, |
|
"eval_runtime": 14.0557, |
|
"eval_samples_per_second": 576.991, |
|
"eval_steps_per_second": 2.277, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.302545428276062, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0308, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.3933587968349457, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0293, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.9876893858865882, |
|
"eval_f1": 0.6104932735426009, |
|
"eval_loss": 0.03747553750872612, |
|
"eval_precision": 0.738046188875637, |
|
"eval_recall": 0.5205322321633402, |
|
"eval_runtime": 14.3104, |
|
"eval_samples_per_second": 566.721, |
|
"eval_steps_per_second": 2.236, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.46541067957878113, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0297, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.41115203499794006, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0297, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.9876057655512565, |
|
"eval_f1": 0.6050313897294611, |
|
"eval_loss": 0.03745245933532715, |
|
"eval_precision": 0.7389673433362753, |
|
"eval_recall": 0.5121969870765466, |
|
"eval_runtime": 14.1164, |
|
"eval_samples_per_second": 574.507, |
|
"eval_steps_per_second": 2.267, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.3442145884037018, |
|
"learning_rate": 1.8767455621301777e-05, |
|
"loss": 0.0268, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.13360050320625305, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.0263, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.9878920589027311, |
|
"eval_f1": 0.6160276866376017, |
|
"eval_loss": 0.03718467801809311, |
|
"eval_precision": 0.7471652856519843, |
|
"eval_recall": 0.524049858530244, |
|
"eval_runtime": 14.154, |
|
"eval_samples_per_second": 572.983, |
|
"eval_steps_per_second": 2.261, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65680473372781, |
|
"grad_norm": 0.31057894229888916, |
|
"learning_rate": 1.8668836291913217e-05, |
|
"loss": 0.0264, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"grad_norm": 0.2787815034389496, |
|
"learning_rate": 1.8619526627218937e-05, |
|
"loss": 0.0265, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"eval_accuracy": 0.9876043482574373, |
|
"eval_f1": 0.6178449707244603, |
|
"eval_loss": 0.03766901046037674, |
|
"eval_precision": 0.7207666428789887, |
|
"eval_recall": 0.5406438785654202, |
|
"eval_runtime": 14.3038, |
|
"eval_samples_per_second": 566.983, |
|
"eval_steps_per_second": 2.237, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.149901380670611, |
|
"grad_norm": 0.20837119221687317, |
|
"learning_rate": 1.8570315581854045e-05, |
|
"loss": 0.0249, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"grad_norm": 0.23010149598121643, |
|
"learning_rate": 1.8521005917159765e-05, |
|
"loss": 0.0235, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"eval_accuracy": 0.9878296979746871, |
|
"eval_f1": 0.6237897042716319, |
|
"eval_loss": 0.03783150017261505, |
|
"eval_precision": 0.7303036520311859, |
|
"eval_recall": 0.5443909153475568, |
|
"eval_runtime": 14.1679, |
|
"eval_samples_per_second": 572.422, |
|
"eval_steps_per_second": 2.259, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.642998027613412, |
|
"grad_norm": 0.3158400058746338, |
|
"learning_rate": 1.8471696252465485e-05, |
|
"loss": 0.0226, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"grad_norm": 0.5647494196891785, |
|
"learning_rate": 1.8422386587771205e-05, |
|
"loss": 0.0237, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"eval_accuracy": 0.9877843445724733, |
|
"eval_f1": 0.6255376460876744, |
|
"eval_loss": 0.03789234161376953, |
|
"eval_precision": 0.7242454728370221, |
|
"eval_recall": 0.5505085264204328, |
|
"eval_runtime": 14.1193, |
|
"eval_samples_per_second": 574.389, |
|
"eval_steps_per_second": 2.266, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.136094674556213, |
|
"grad_norm": 0.2036384493112564, |
|
"learning_rate": 1.8373076923076926e-05, |
|
"loss": 0.0216, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"grad_norm": 0.1960095316171646, |
|
"learning_rate": 1.8323865877712033e-05, |
|
"loss": 0.0205, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"eval_accuracy": 0.987797100216846, |
|
"eval_f1": 0.6324282786885246, |
|
"eval_loss": 0.038331326097249985, |
|
"eval_precision": 0.7158596694694114, |
|
"eval_recall": 0.5664143152099105, |
|
"eval_runtime": 14.1717, |
|
"eval_samples_per_second": 572.267, |
|
"eval_steps_per_second": 2.258, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.629191321499015, |
|
"grad_norm": 0.44110924005508423, |
|
"learning_rate": 1.8274556213017754e-05, |
|
"loss": 0.0209, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"grad_norm": 0.5276309847831726, |
|
"learning_rate": 1.822524654832347e-05, |
|
"loss": 0.0208, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"eval_accuracy": 0.9874413594682313, |
|
"eval_f1": 0.631083725384071, |
|
"eval_loss": 0.03931192308664322, |
|
"eval_precision": 0.6926521659660025, |
|
"eval_recall": 0.579567179016594, |
|
"eval_runtime": 14.1285, |
|
"eval_samples_per_second": 574.016, |
|
"eval_steps_per_second": 2.265, |
|
"step": 18000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 406579786604172.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|