|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.757986669731096,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.09193288899103655,
|
|
"grad_norm": 0.09970999509096146,
|
|
"learning_rate": 0.00019988036932954271,
|
|
"loss": 0.7687,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.1838657779820731,
|
|
"grad_norm": 0.11836739629507065,
|
|
"learning_rate": 0.00019892504066072438,
|
|
"loss": 0.5693,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.2757986669731096,
|
|
"grad_norm": 0.1443864107131958,
|
|
"learning_rate": 0.0001970235207829469,
|
|
"loss": 0.5042,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.3677315559641462,
|
|
"grad_norm": 0.1083601862192154,
|
|
"learning_rate": 0.0001941939972186009,
|
|
"loss": 0.4697,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.4596644449551827,
|
|
"grad_norm": 0.10843081027269363,
|
|
"learning_rate": 0.00019046353359388504,
|
|
"loss": 0.4447,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.5515973339462192,
|
|
"grad_norm": 0.11222769320011139,
|
|
"learning_rate": 0.00018586781078255458,
|
|
"loss": 0.4265,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.6435302229372558,
|
|
"grad_norm": 0.11036638170480728,
|
|
"learning_rate": 0.00018045078562803203,
|
|
"loss": 0.4062,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.7354631119282924,
|
|
"grad_norm": 0.1659434586763382,
|
|
"learning_rate": 0.0001742642705081106,
|
|
"loss": 0.3864,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.8273960009193289,
|
|
"grad_norm": 0.14999587833881378,
|
|
"learning_rate": 0.00016736743776359978,
|
|
"loss": 0.3771,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.9193288899103654,
|
|
"grad_norm": 0.10731250792741776,
|
|
"learning_rate": 0.00015982625373091875,
|
|
"loss": 0.3602,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 1.011261778901402,
|
|
"grad_norm": 0.15278048813343048,
|
|
"learning_rate": 0.00015171284779196334,
|
|
"loss": 0.357,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 1.1031946678924385,
|
|
"grad_norm": 0.08481089770793915,
|
|
"learning_rate": 0.00014310482247611208,
|
|
"loss": 0.3405,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 1.1951275568834752,
|
|
"grad_norm": 0.09210831671953201,
|
|
"learning_rate": 0.00013408451121306046,
|
|
"loss": 0.3361,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.2870604458745116,
|
|
"grad_norm": 0.09653517603874207,
|
|
"learning_rate": 0.0001247381908358749,
|
|
"loss": 0.3308,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.378993334865548,
|
|
"grad_norm": 0.11181017011404037,
|
|
"learning_rate": 0.00011515525636646231,
|
|
"loss": 0.3277,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.4709262238565848,
|
|
"grad_norm": 0.08782528340816498,
|
|
"learning_rate": 0.00010542736597640826,
|
|
"loss": 0.3305,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.5628591128476212,
|
|
"grad_norm": 0.0979943498969078,
|
|
"learning_rate": 9.564756430140164e-05,
|
|
"loss": 0.3246,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.6547920018386577,
|
|
"grad_norm": 0.08940441906452179,
|
|
"learning_rate": 8.590939249450595e-05,
|
|
"loss": 0.3229,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.7467248908296944,
|
|
"grad_norm": 0.08743057399988174,
|
|
"learning_rate": 7.630599353037633e-05,
|
|
"loss": 0.3235,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.8386577798207309,
|
|
"grad_norm": 0.12237745523452759,
|
|
"learning_rate": 6.692922131794517e-05,
|
|
"loss": 0.3219,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.9305906688117673,
|
|
"grad_norm": 0.08483380824327469,
|
|
"learning_rate": 5.7868762142672204e-05,
|
|
"loss": 0.3206,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 2.022523557802804,
|
|
"grad_norm": 0.08572427928447723,
|
|
"learning_rate": 4.9211276841525744e-05,
|
|
"loss": 0.3117,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 2.1144564467938407,
|
|
"grad_norm": 0.09082117676734924,
|
|
"learning_rate": 4.103957191555876e-05,
|
|
"loss": 0.3008,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 2.206389335784877,
|
|
"grad_norm": 0.09148624539375305,
|
|
"learning_rate": 3.343180750816377e-05,
|
|
"loss": 0.3023,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 2.2983222247759136,
|
|
"grad_norm": 0.09966079890727997,
|
|
"learning_rate": 2.6460749824479912e-05,
|
|
"loss": 0.3044,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.3902551137669503,
|
|
"grad_norm": 0.09154877066612244,
|
|
"learning_rate": 2.019307514235741e-05,
|
|
"loss": 0.3015,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 2.4821880027579866,
|
|
"grad_norm": 0.08882308751344681,
|
|
"learning_rate": 1.4688732071827094e-05,
|
|
"loss": 0.3011,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.5741208917490233,
|
|
"grad_norm": 0.09436959028244019,
|
|
"learning_rate": 1.0000368162888795e-05,
|
|
"loss": 0.2982,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.66605378074006,
|
|
"grad_norm": 0.10637196153402328,
|
|
"learning_rate": 6.1728263459614796e-06,
|
|
"loss": 0.2993,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.757986669731096,
|
|
"grad_norm": 0.10626891255378723,
|
|
"learning_rate": 3.2427160214043793e-06,
|
|
"loss": 0.304,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 3261,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 8.528579886191739e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|