|
{ |
|
"best_metric": 0.8354430379746836, |
|
"best_model_checkpoint": "deit-base-distilled-patch16-224-55-fold1/checkpoint-101", |
|
"epoch": 85.71428571428571, |
|
"eval_steps": 500, |
|
"global_step": 300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"eval_accuracy": 0.43037974683544306, |
|
"eval_loss": 0.7879665493965149, |
|
"eval_runtime": 1.0027, |
|
"eval_samples_per_second": 78.785, |
|
"eval_steps_per_second": 2.992, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5443037974683544, |
|
"eval_loss": 0.833726704120636, |
|
"eval_runtime": 0.9971, |
|
"eval_samples_per_second": 79.229, |
|
"eval_steps_per_second": 3.009, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 4.414129734039307, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.776, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_accuracy": 0.5443037974683544, |
|
"eval_loss": 0.704697847366333, |
|
"eval_runtime": 1.0453, |
|
"eval_samples_per_second": 75.575, |
|
"eval_steps_per_second": 2.87, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.6729037761688232, |
|
"eval_runtime": 1.0381, |
|
"eval_samples_per_second": 76.102, |
|
"eval_steps_per_second": 2.89, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 4.857142857142857, |
|
"eval_accuracy": 0.569620253164557, |
|
"eval_loss": 0.6784361600875854, |
|
"eval_runtime": 1.0714, |
|
"eval_samples_per_second": 73.736, |
|
"eval_steps_per_second": 2.8, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 1.1414231061935425, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.6852, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6329113924050633, |
|
"eval_loss": 0.6250560879707336, |
|
"eval_runtime": 1.0536, |
|
"eval_samples_per_second": 74.978, |
|
"eval_steps_per_second": 2.847, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"eval_accuracy": 0.6329113924050633, |
|
"eval_loss": 0.624561607837677, |
|
"eval_runtime": 1.0579, |
|
"eval_samples_per_second": 74.676, |
|
"eval_steps_per_second": 2.836, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.6835443037974683, |
|
"eval_loss": 0.5865054726600647, |
|
"eval_runtime": 1.0641, |
|
"eval_samples_per_second": 74.244, |
|
"eval_steps_per_second": 2.819, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 4.807124614715576, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6223, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 8.857142857142858, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.5573264956474304, |
|
"eval_runtime": 1.0677, |
|
"eval_samples_per_second": 73.988, |
|
"eval_steps_per_second": 2.81, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.5841125249862671, |
|
"eval_runtime": 1.0817, |
|
"eval_samples_per_second": 73.035, |
|
"eval_steps_per_second": 2.773, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 10.857142857142858, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.5790536403656006, |
|
"eval_runtime": 1.067, |
|
"eval_samples_per_second": 74.04, |
|
"eval_steps_per_second": 2.812, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 2.0765061378479004, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.5573, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.7215189873417721, |
|
"eval_loss": 0.5282906889915466, |
|
"eval_runtime": 1.1116, |
|
"eval_samples_per_second": 71.069, |
|
"eval_steps_per_second": 2.699, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 12.857142857142858, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.5125677585601807, |
|
"eval_runtime": 1.1021, |
|
"eval_samples_per_second": 71.681, |
|
"eval_steps_per_second": 2.722, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.7088607594936709, |
|
"eval_loss": 0.5642557740211487, |
|
"eval_runtime": 1.1087, |
|
"eval_samples_per_second": 71.253, |
|
"eval_steps_per_second": 2.706, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"grad_norm": 5.382793426513672, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.4772, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 14.857142857142858, |
|
"eval_accuracy": 0.6582278481012658, |
|
"eval_loss": 0.6735838055610657, |
|
"eval_runtime": 1.1489, |
|
"eval_samples_per_second": 68.759, |
|
"eval_steps_per_second": 2.611, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.570650041103363, |
|
"eval_runtime": 1.1118, |
|
"eval_samples_per_second": 71.054, |
|
"eval_steps_per_second": 2.698, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 16.857142857142858, |
|
"eval_accuracy": 0.7215189873417721, |
|
"eval_loss": 0.5198583602905273, |
|
"eval_runtime": 1.1171, |
|
"eval_samples_per_second": 70.72, |
|
"eval_steps_per_second": 2.686, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 17.142857142857142, |
|
"grad_norm": 5.891899108886719, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.4656, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.5285176634788513, |
|
"eval_runtime": 1.1106, |
|
"eval_samples_per_second": 71.133, |
|
"eval_steps_per_second": 2.701, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 18.857142857142858, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.4534749984741211, |
|
"eval_runtime": 1.1235, |
|
"eval_samples_per_second": 70.319, |
|
"eval_steps_per_second": 2.67, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 3.8716275691986084, |
|
"learning_rate": 4.259259259259259e-05, |
|
"loss": 0.4147, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.45567476749420166, |
|
"eval_runtime": 1.1234, |
|
"eval_samples_per_second": 70.324, |
|
"eval_steps_per_second": 2.671, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 20.857142857142858, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.44825637340545654, |
|
"eval_runtime": 1.1316, |
|
"eval_samples_per_second": 69.813, |
|
"eval_steps_per_second": 2.651, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.5025177001953125, |
|
"eval_runtime": 1.1367, |
|
"eval_samples_per_second": 69.497, |
|
"eval_steps_per_second": 2.639, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"grad_norm": 3.101086378097534, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.342, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"eval_accuracy": 0.7215189873417721, |
|
"eval_loss": 0.4716118574142456, |
|
"eval_runtime": 1.176, |
|
"eval_samples_per_second": 67.18, |
|
"eval_steps_per_second": 2.551, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.7341772151898734, |
|
"eval_loss": 0.5310951471328735, |
|
"eval_runtime": 1.1575, |
|
"eval_samples_per_second": 68.251, |
|
"eval_steps_per_second": 2.592, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 24.857142857142858, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.45601552724838257, |
|
"eval_runtime": 1.1329, |
|
"eval_samples_per_second": 69.732, |
|
"eval_steps_per_second": 2.648, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 25.714285714285715, |
|
"grad_norm": 5.771670818328857, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.2993, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.511889636516571, |
|
"eval_runtime": 1.1386, |
|
"eval_samples_per_second": 69.386, |
|
"eval_steps_per_second": 2.635, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 26.857142857142858, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.532077968120575, |
|
"eval_runtime": 1.1122, |
|
"eval_samples_per_second": 71.03, |
|
"eval_steps_per_second": 2.697, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.49367329478263855, |
|
"eval_runtime": 1.1193, |
|
"eval_samples_per_second": 70.578, |
|
"eval_steps_per_second": 2.68, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 28.571428571428573, |
|
"grad_norm": 3.30381441116333, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.2506, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 28.857142857142858, |
|
"eval_accuracy": 0.8354430379746836, |
|
"eval_loss": 0.4563353955745697, |
|
"eval_runtime": 1.1319, |
|
"eval_samples_per_second": 69.793, |
|
"eval_steps_per_second": 2.65, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.5234401822090149, |
|
"eval_runtime": 1.1604, |
|
"eval_samples_per_second": 68.081, |
|
"eval_steps_per_second": 2.585, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 30.857142857142858, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.5359364151954651, |
|
"eval_runtime": 1.1233, |
|
"eval_samples_per_second": 70.328, |
|
"eval_steps_per_second": 2.671, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 31.428571428571427, |
|
"grad_norm": 2.6743521690368652, |
|
"learning_rate": 3.518518518518519e-05, |
|
"loss": 0.2201, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.5144768357276917, |
|
"eval_runtime": 1.1392, |
|
"eval_samples_per_second": 69.347, |
|
"eval_steps_per_second": 2.633, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 32.857142857142854, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.534345269203186, |
|
"eval_runtime": 1.1264, |
|
"eval_samples_per_second": 70.134, |
|
"eval_steps_per_second": 2.663, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.468890905380249, |
|
"eval_runtime": 1.1486, |
|
"eval_samples_per_second": 68.782, |
|
"eval_steps_per_second": 2.612, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 34.285714285714285, |
|
"grad_norm": 3.1685876846313477, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2098, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 34.857142857142854, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.6464852690696716, |
|
"eval_runtime": 1.1773, |
|
"eval_samples_per_second": 67.104, |
|
"eval_steps_per_second": 2.548, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.5002541542053223, |
|
"eval_runtime": 1.1346, |
|
"eval_samples_per_second": 69.626, |
|
"eval_steps_per_second": 2.644, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 36.857142857142854, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.611255943775177, |
|
"eval_runtime": 1.139, |
|
"eval_samples_per_second": 69.361, |
|
"eval_steps_per_second": 2.634, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 37.142857142857146, |
|
"grad_norm": 5.094492435455322, |
|
"learning_rate": 3.148148148148148e-05, |
|
"loss": 0.1808, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.8215884566307068, |
|
"eval_runtime": 1.1601, |
|
"eval_samples_per_second": 68.096, |
|
"eval_steps_per_second": 2.586, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 38.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.5603389739990234, |
|
"eval_runtime": 1.1334, |
|
"eval_samples_per_second": 69.699, |
|
"eval_steps_per_second": 2.647, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"grad_norm": 3.0100135803222656, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.1892, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.613608181476593, |
|
"eval_runtime": 1.1451, |
|
"eval_samples_per_second": 68.989, |
|
"eval_steps_per_second": 2.62, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 40.857142857142854, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.6074010133743286, |
|
"eval_runtime": 1.181, |
|
"eval_samples_per_second": 66.893, |
|
"eval_steps_per_second": 2.54, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.6503015756607056, |
|
"eval_runtime": 1.1394, |
|
"eval_samples_per_second": 69.334, |
|
"eval_steps_per_second": 2.633, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 42.857142857142854, |
|
"grad_norm": 2.3642420768737793, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.154, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 42.857142857142854, |
|
"eval_accuracy": 0.759493670886076, |
|
"eval_loss": 0.7923038601875305, |
|
"eval_runtime": 1.1209, |
|
"eval_samples_per_second": 70.482, |
|
"eval_steps_per_second": 2.677, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.7790768146514893, |
|
"eval_runtime": 1.1483, |
|
"eval_samples_per_second": 68.795, |
|
"eval_steps_per_second": 2.612, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 44.857142857142854, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.7947573661804199, |
|
"eval_runtime": 1.1487, |
|
"eval_samples_per_second": 68.772, |
|
"eval_steps_per_second": 2.612, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 45.714285714285715, |
|
"grad_norm": 3.1455414295196533, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.1613, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.727028489112854, |
|
"eval_runtime": 1.1467, |
|
"eval_samples_per_second": 68.895, |
|
"eval_steps_per_second": 2.616, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 46.857142857142854, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7283490300178528, |
|
"eval_runtime": 1.1446, |
|
"eval_samples_per_second": 69.021, |
|
"eval_steps_per_second": 2.621, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.7057384848594666, |
|
"eval_runtime": 1.1324, |
|
"eval_samples_per_second": 69.764, |
|
"eval_steps_per_second": 2.649, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 48.57142857142857, |
|
"grad_norm": 3.1506268978118896, |
|
"learning_rate": 2.4074074074074074e-05, |
|
"loss": 0.141, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 48.857142857142854, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.6691973209381104, |
|
"eval_runtime": 1.1153, |
|
"eval_samples_per_second": 70.834, |
|
"eval_steps_per_second": 2.69, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6390013098716736, |
|
"eval_runtime": 1.1382, |
|
"eval_samples_per_second": 69.408, |
|
"eval_steps_per_second": 2.636, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 50.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6543086171150208, |
|
"eval_runtime": 1.1235, |
|
"eval_samples_per_second": 70.319, |
|
"eval_steps_per_second": 2.67, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 51.42857142857143, |
|
"grad_norm": 2.4540371894836426, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.1434, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"eval_accuracy": 0.7468354430379747, |
|
"eval_loss": 0.773634135723114, |
|
"eval_runtime": 1.1471, |
|
"eval_samples_per_second": 68.869, |
|
"eval_steps_per_second": 2.615, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 52.857142857142854, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.6425884366035461, |
|
"eval_runtime": 1.1427, |
|
"eval_samples_per_second": 69.136, |
|
"eval_steps_per_second": 2.625, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.6890805959701538, |
|
"eval_runtime": 1.1358, |
|
"eval_samples_per_second": 69.556, |
|
"eval_steps_per_second": 2.641, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 54.285714285714285, |
|
"grad_norm": 3.488741636276245, |
|
"learning_rate": 2.037037037037037e-05, |
|
"loss": 0.1583, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 54.857142857142854, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7520738244056702, |
|
"eval_runtime": 1.1294, |
|
"eval_samples_per_second": 69.949, |
|
"eval_steps_per_second": 2.656, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.6495163440704346, |
|
"eval_runtime": 1.1339, |
|
"eval_samples_per_second": 69.67, |
|
"eval_steps_per_second": 2.646, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 56.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.704879641532898, |
|
"eval_runtime": 1.1387, |
|
"eval_samples_per_second": 69.379, |
|
"eval_steps_per_second": 2.635, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 57.142857142857146, |
|
"grad_norm": 2.114783525466919, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.1418, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7533851861953735, |
|
"eval_runtime": 1.1446, |
|
"eval_samples_per_second": 69.022, |
|
"eval_steps_per_second": 2.621, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 58.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.68922358751297, |
|
"eval_runtime": 1.1411, |
|
"eval_samples_per_second": 69.23, |
|
"eval_steps_per_second": 2.629, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"grad_norm": 3.576995611190796, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.1488, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.752753734588623, |
|
"eval_runtime": 1.1437, |
|
"eval_samples_per_second": 69.076, |
|
"eval_steps_per_second": 2.623, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 60.857142857142854, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6920117735862732, |
|
"eval_runtime": 1.1413, |
|
"eval_samples_per_second": 69.217, |
|
"eval_steps_per_second": 2.629, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"eval_accuracy": 0.7721518987341772, |
|
"eval_loss": 0.6767023801803589, |
|
"eval_runtime": 1.1202, |
|
"eval_samples_per_second": 70.526, |
|
"eval_steps_per_second": 2.678, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 62.857142857142854, |
|
"grad_norm": 3.9229702949523926, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 0.1481, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 62.857142857142854, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7510101795196533, |
|
"eval_runtime": 1.1464, |
|
"eval_samples_per_second": 68.911, |
|
"eval_steps_per_second": 2.617, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.6074531674385071, |
|
"eval_runtime": 1.1404, |
|
"eval_samples_per_second": 69.276, |
|
"eval_steps_per_second": 2.631, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 64.85714285714286, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.5857882499694824, |
|
"eval_runtime": 1.1393, |
|
"eval_samples_per_second": 69.341, |
|
"eval_steps_per_second": 2.633, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 65.71428571428571, |
|
"grad_norm": 1.5416897535324097, |
|
"learning_rate": 1.2962962962962962e-05, |
|
"loss": 0.1014, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.666824460029602, |
|
"eval_runtime": 1.124, |
|
"eval_samples_per_second": 70.287, |
|
"eval_steps_per_second": 2.669, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 66.85714285714286, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6127079129219055, |
|
"eval_runtime": 1.1537, |
|
"eval_samples_per_second": 68.477, |
|
"eval_steps_per_second": 2.6, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6295149922370911, |
|
"eval_runtime": 1.1392, |
|
"eval_samples_per_second": 69.346, |
|
"eval_steps_per_second": 2.633, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 68.57142857142857, |
|
"grad_norm": 3.536512851715088, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1147, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 68.85714285714286, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.6722728610038757, |
|
"eval_runtime": 1.1376, |
|
"eval_samples_per_second": 69.442, |
|
"eval_steps_per_second": 2.637, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy": 0.7848101265822784, |
|
"eval_loss": 0.7166734933853149, |
|
"eval_runtime": 1.1521, |
|
"eval_samples_per_second": 68.57, |
|
"eval_steps_per_second": 2.604, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 70.85714285714286, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6914483904838562, |
|
"eval_runtime": 1.1378, |
|
"eval_samples_per_second": 69.429, |
|
"eval_steps_per_second": 2.637, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 71.42857142857143, |
|
"grad_norm": 2.5358872413635254, |
|
"learning_rate": 9.259259259259259e-06, |
|
"loss": 0.1289, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6676318645477295, |
|
"eval_runtime": 1.1313, |
|
"eval_samples_per_second": 69.83, |
|
"eval_steps_per_second": 2.652, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 72.85714285714286, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.6874268651008606, |
|
"eval_runtime": 1.1388, |
|
"eval_samples_per_second": 69.372, |
|
"eval_steps_per_second": 2.634, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.7485886216163635, |
|
"eval_runtime": 1.1881, |
|
"eval_samples_per_second": 66.491, |
|
"eval_steps_per_second": 2.525, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 74.28571428571429, |
|
"grad_norm": 2.7322018146514893, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.1084, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 74.85714285714286, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.7192913293838501, |
|
"eval_runtime": 1.1432, |
|
"eval_samples_per_second": 69.106, |
|
"eval_steps_per_second": 2.624, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"eval_accuracy": 0.8354430379746836, |
|
"eval_loss": 0.7053883671760559, |
|
"eval_runtime": 1.1734, |
|
"eval_samples_per_second": 67.328, |
|
"eval_steps_per_second": 2.557, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 76.85714285714286, |
|
"eval_accuracy": 0.8227848101265823, |
|
"eval_loss": 0.7052078247070312, |
|
"eval_runtime": 1.1272, |
|
"eval_samples_per_second": 70.087, |
|
"eval_steps_per_second": 2.662, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 77.14285714285714, |
|
"grad_norm": 2.65423583984375, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.11, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6885020136833191, |
|
"eval_runtime": 1.1436, |
|
"eval_samples_per_second": 69.078, |
|
"eval_steps_per_second": 2.623, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 78.85714285714286, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.7162737846374512, |
|
"eval_runtime": 1.1462, |
|
"eval_samples_per_second": 68.925, |
|
"eval_steps_per_second": 2.617, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"grad_norm": 3.0879228115081787, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.1144, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6901566982269287, |
|
"eval_runtime": 1.1477, |
|
"eval_samples_per_second": 68.833, |
|
"eval_steps_per_second": 2.614, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 80.85714285714286, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.6886458396911621, |
|
"eval_runtime": 1.1476, |
|
"eval_samples_per_second": 68.837, |
|
"eval_steps_per_second": 2.614, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"eval_accuracy": 0.8227848101265823, |
|
"eval_loss": 0.7062325477600098, |
|
"eval_runtime": 1.1538, |
|
"eval_samples_per_second": 68.471, |
|
"eval_steps_per_second": 2.6, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 82.85714285714286, |
|
"grad_norm": 2.7569634914398193, |
|
"learning_rate": 1.8518518518518519e-06, |
|
"loss": 0.1026, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 82.85714285714286, |
|
"eval_accuracy": 0.810126582278481, |
|
"eval_loss": 0.7195845246315002, |
|
"eval_runtime": 1.1374, |
|
"eval_samples_per_second": 69.459, |
|
"eval_steps_per_second": 2.638, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.7332103252410889, |
|
"eval_runtime": 1.1551, |
|
"eval_samples_per_second": 68.392, |
|
"eval_steps_per_second": 2.597, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 84.85714285714286, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.7225062847137451, |
|
"eval_runtime": 1.1807, |
|
"eval_samples_per_second": 66.909, |
|
"eval_steps_per_second": 2.541, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"grad_norm": 2.9502005577087402, |
|
"learning_rate": 0.0, |
|
"loss": 0.1143, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"eval_accuracy": 0.7974683544303798, |
|
"eval_loss": 0.7162460684776306, |
|
"eval_runtime": 1.2487, |
|
"eval_samples_per_second": 63.268, |
|
"eval_steps_per_second": 2.403, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"step": 300, |
|
"total_flos": 2.9362240500074496e+18, |
|
"train_loss": 0.25938483238220217, |
|
"train_runtime": 1661.2834, |
|
"train_samples_per_second": 26.606, |
|
"train_steps_per_second": 0.181 |
|
}, |
|
{ |
|
"epoch": 85.71428571428571, |
|
"eval_accuracy": 0.8354430379746836, |
|
"eval_loss": 0.4563353955745697, |
|
"eval_runtime": 1.1716, |
|
"eval_samples_per_second": 67.429, |
|
"eval_steps_per_second": 2.561, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 300, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.9362240500074496e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|