|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.448383733055266, |
|
"eval_steps": 500, |
|
"global_step": 430, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0416666666666667e-06, |
|
"loss": 0.7671, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0416666666666668e-05, |
|
"loss": 0.6676, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.5586, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.4224, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.3747, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999940408373438e-05, |
|
"loss": 0.3632, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.997854999726187e-05, |
|
"loss": 0.3615, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.99279285007165e-05, |
|
"loss": 0.346, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.984759992069235e-05, |
|
"loss": 0.3425, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.973765998627628e-05, |
|
"loss": 0.3341, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.959823971496574e-05, |
|
"loss": 0.3289, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.942950525653282e-05, |
|
"loss": 0.3351, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.923165769502033e-05, |
|
"loss": 0.3279, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.900493280910623e-05, |
|
"loss": 0.3255, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.874960079112168e-05, |
|
"loss": 0.3365, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.846596592505776e-05, |
|
"loss": 0.3486, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.815436622394441e-05, |
|
"loss": 0.3294, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.781517302703405e-05, |
|
"loss": 0.3316, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.744879055726944e-05, |
|
"loss": 0.3176, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.7055655439563656e-05, |
|
"loss": 0.327, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.6636236180465876e-05, |
|
"loss": 0.3203, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.619103260983328e-05, |
|
"loss": 0.3225, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.572057528517441e-05, |
|
"loss": 0.3125, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.522542485937369e-05, |
|
"loss": 0.3141, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.4706171412550815e-05, |
|
"loss": 0.2982, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.416343374885116e-05, |
|
"loss": 0.2867, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.359785865900513e-05, |
|
"loss": 0.2975, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.301012014953544e-05, |
|
"loss": 0.3021, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.240091863953084e-05, |
|
"loss": 0.3082, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.177098012594341e-05, |
|
"loss": 0.306, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.1121055318404264e-05, |
|
"loss": 0.2859, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.045191874458876e-05, |
|
"loss": 0.2982, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 3.976436782719713e-05, |
|
"loss": 0.2956, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.905922193365089e-05, |
|
"loss": 0.2958, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 3.8337321399637125e-05, |
|
"loss": 0.2952, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.75995265276646e-05, |
|
"loss": 0.2918, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 3.6846716561824965e-05, |
|
"loss": 0.2909, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.6079788639981036e-05, |
|
"loss": 0.2927, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.529965672463056e-05, |
|
"loss": 0.2889, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.450725051371987e-05, |
|
"loss": 0.2752, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 3.370351433270529e-05, |
|
"loss": 0.2883, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 3.288940600918266e-05, |
|
"loss": 0.2761, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 3.2065895731425995e-05, |
|
"loss": 0.2824, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 3.1233964892195884e-05, |
|
"loss": 0.2919, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"step": 430, |
|
"total_flos": 3.022612632268964e+17, |
|
"train_loss": 0.33035796076752416, |
|
"train_runtime": 4854.2663, |
|
"train_samples_per_second": 3.158, |
|
"train_steps_per_second": 0.197 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 958, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 3.022612632268964e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|