|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 1.4903129657228018,
|
|
"eval_steps": 500,
|
|
"global_step": 1000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.029806259314456036,
|
|
"grad_norm": 0.22671940922737122,
|
|
"learning_rate": 8e-05,
|
|
"loss": 1.3506,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.05961251862891207,
|
|
"grad_norm": 0.05189573019742966,
|
|
"learning_rate": 0.00016,
|
|
"loss": 0.8372,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.08941877794336811,
|
|
"grad_norm": 0.049646440893411636,
|
|
"learning_rate": 0.0001999871938117722,
|
|
"loss": 0.7053,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.11922503725782414,
|
|
"grad_norm": 0.04605806618928909,
|
|
"learning_rate": 0.0001998847639849245,
|
|
"loss": 0.5957,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.14903129657228018,
|
|
"grad_norm": 0.04606040194630623,
|
|
"learning_rate": 0.0001996800092633612,
|
|
"loss": 0.5457,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.17883755588673622,
|
|
"grad_norm": 0.03791499882936478,
|
|
"learning_rate": 0.0001993731394038509,
|
|
"loss": 0.5301,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 0.20864381520119224,
|
|
"grad_norm": 0.047940950840711594,
|
|
"learning_rate": 0.0001989644687729177,
|
|
"loss": 0.5223,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 0.23845007451564829,
|
|
"grad_norm": 0.0519738644361496,
|
|
"learning_rate": 0.0001984544160247949,
|
|
"loss": 0.5117,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 0.26825633383010433,
|
|
"grad_norm": 0.04569224268198013,
|
|
"learning_rate": 0.00019784350367254322,
|
|
"loss": 0.5181,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 0.29806259314456035,
|
|
"grad_norm": 0.045780543237924576,
|
|
"learning_rate": 0.0001971323575527731,
|
|
"loss": 0.5168,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.32786885245901637,
|
|
"grad_norm": 0.046496499329805374,
|
|
"learning_rate": 0.0001963217061845192,
|
|
"loss": 0.5051,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 0.35767511177347244,
|
|
"grad_norm": 0.04850991442799568,
|
|
"learning_rate": 0.00019541238002292394,
|
|
"loss": 0.5083,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 0.38748137108792846,
|
|
"grad_norm": 0.046593230217695236,
|
|
"learning_rate": 0.00019440531060849504,
|
|
"loss": 0.5066,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 0.4172876304023845,
|
|
"grad_norm": 0.050816215574741364,
|
|
"learning_rate": 0.00019330152961280765,
|
|
"loss": 0.5021,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 0.44709388971684055,
|
|
"grad_norm": 0.0533340722322464,
|
|
"learning_rate": 0.00019210216778162994,
|
|
"loss": 0.5017,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.47690014903129657,
|
|
"grad_norm": 0.10607749223709106,
|
|
"learning_rate": 0.00019080845377655352,
|
|
"loss": 0.4996,
|
|
"step": 320
|
|
},
|
|
{
|
|
"epoch": 0.5067064083457526,
|
|
"grad_norm": 0.054169464856386185,
|
|
"learning_rate": 0.00018942171291631653,
|
|
"loss": 0.5014,
|
|
"step": 340
|
|
},
|
|
{
|
|
"epoch": 0.5365126676602087,
|
|
"grad_norm": 0.047340892255306244,
|
|
"learning_rate": 0.000187943365819108,
|
|
"loss": 0.4988,
|
|
"step": 360
|
|
},
|
|
{
|
|
"epoch": 0.5663189269746647,
|
|
"grad_norm": 0.04948943480849266,
|
|
"learning_rate": 0.00018637492694724466,
|
|
"loss": 0.4975,
|
|
"step": 380
|
|
},
|
|
{
|
|
"epoch": 0.5961251862891207,
|
|
"grad_norm": 0.04684118553996086,
|
|
"learning_rate": 0.00018471800305571129,
|
|
"loss": 0.4961,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.6259314456035767,
|
|
"grad_norm": 0.04512176662683487,
|
|
"learning_rate": 0.00018297429154615338,
|
|
"loss": 0.4956,
|
|
"step": 420
|
|
},
|
|
{
|
|
"epoch": 0.6557377049180327,
|
|
"grad_norm": 0.05711393430829048,
|
|
"learning_rate": 0.00018114557872800905,
|
|
"loss": 0.4944,
|
|
"step": 440
|
|
},
|
|
{
|
|
"epoch": 0.6855439642324889,
|
|
"grad_norm": 0.05339927598834038,
|
|
"learning_rate": 0.000179233737988561,
|
|
"loss": 0.4922,
|
|
"step": 460
|
|
},
|
|
{
|
|
"epoch": 0.7153502235469449,
|
|
"grad_norm": 0.04622140899300575,
|
|
"learning_rate": 0.00017724072787378333,
|
|
"loss": 0.4846,
|
|
"step": 480
|
|
},
|
|
{
|
|
"epoch": 0.7451564828614009,
|
|
"grad_norm": 0.0447109080851078,
|
|
"learning_rate": 0.00017516859008194938,
|
|
"loss": 0.4855,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.7749627421758569,
|
|
"grad_norm": 0.0435018390417099,
|
|
"learning_rate": 0.00017301944737205588,
|
|
"loss": 0.486,
|
|
"step": 520
|
|
},
|
|
{
|
|
"epoch": 0.8047690014903129,
|
|
"grad_norm": 0.04844864085316658,
|
|
"learning_rate": 0.0001707955013892061,
|
|
"loss": 0.4911,
|
|
"step": 540
|
|
},
|
|
{
|
|
"epoch": 0.834575260804769,
|
|
"grad_norm": 0.05155207961797714,
|
|
"learning_rate": 0.00016849903040917975,
|
|
"loss": 0.4861,
|
|
"step": 560
|
|
},
|
|
{
|
|
"epoch": 0.8643815201192251,
|
|
"grad_norm": 0.04522501304745674,
|
|
"learning_rate": 0.00016613238700450013,
|
|
"loss": 0.4852,
|
|
"step": 580
|
|
},
|
|
{
|
|
"epoch": 0.8941877794336811,
|
|
"grad_norm": 0.047208670526742935,
|
|
"learning_rate": 0.00016369799563438958,
|
|
"loss": 0.4844,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.9239940387481371,
|
|
"grad_norm": 0.054809391498565674,
|
|
"learning_rate": 0.00016119835016108197,
|
|
"loss": 0.4815,
|
|
"step": 620
|
|
},
|
|
{
|
|
"epoch": 0.9538002980625931,
|
|
"grad_norm": 0.04929815232753754,
|
|
"learning_rate": 0.00015863601129503688,
|
|
"loss": 0.4823,
|
|
"step": 640
|
|
},
|
|
{
|
|
"epoch": 0.9836065573770492,
|
|
"grad_norm": 0.05650775134563446,
|
|
"learning_rate": 0.00015601360397167214,
|
|
"loss": 0.481,
|
|
"step": 660
|
|
},
|
|
{
|
|
"epoch": 1.0134128166915053,
|
|
"grad_norm": 0.04741150885820389,
|
|
"learning_rate": 0.00015333381466230294,
|
|
"loss": 0.488,
|
|
"step": 680
|
|
},
|
|
{
|
|
"epoch": 1.0432190760059612,
|
|
"grad_norm": 0.057174187153577805,
|
|
"learning_rate": 0.00015059938862204127,
|
|
"loss": 0.4821,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 1.0730253353204173,
|
|
"grad_norm": 0.054773855954408646,
|
|
"learning_rate": 0.0001478131270774758,
|
|
"loss": 0.4776,
|
|
"step": 720
|
|
},
|
|
{
|
|
"epoch": 1.1028315946348732,
|
|
"grad_norm": 0.05249800160527229,
|
|
"learning_rate": 0.0001449778843570128,
|
|
"loss": 0.4782,
|
|
"step": 740
|
|
},
|
|
{
|
|
"epoch": 1.1326378539493294,
|
|
"grad_norm": 0.049587223678827286,
|
|
"learning_rate": 0.00014209656496681812,
|
|
"loss": 0.4819,
|
|
"step": 760
|
|
},
|
|
{
|
|
"epoch": 1.1624441132637853,
|
|
"grad_norm": 0.048591841012239456,
|
|
"learning_rate": 0.0001391721206153554,
|
|
"loss": 0.4796,
|
|
"step": 780
|
|
},
|
|
{
|
|
"epoch": 1.1922503725782414,
|
|
"grad_norm": 0.05344521254301071,
|
|
"learning_rate": 0.000136207547189569,
|
|
"loss": 0.472,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 1.2220566318926975,
|
|
"grad_norm": 0.052742790430784225,
|
|
"learning_rate": 0.0001332058816858092,
|
|
"loss": 0.476,
|
|
"step": 820
|
|
},
|
|
{
|
|
"epoch": 1.2518628912071534,
|
|
"grad_norm": 0.05483981594443321,
|
|
"learning_rate": 0.00013017019909864364,
|
|
"loss": 0.4755,
|
|
"step": 840
|
|
},
|
|
{
|
|
"epoch": 1.2816691505216096,
|
|
"grad_norm": 0.050890855491161346,
|
|
"learning_rate": 0.00012710360927074233,
|
|
"loss": 0.4719,
|
|
"step": 860
|
|
},
|
|
{
|
|
"epoch": 1.3114754098360657,
|
|
"grad_norm": 0.05531829223036766,
|
|
"learning_rate": 0.0001240092537070631,
|
|
"loss": 0.4745,
|
|
"step": 880
|
|
},
|
|
{
|
|
"epoch": 1.3412816691505216,
|
|
"grad_norm": 0.05655696988105774,
|
|
"learning_rate": 0.00012089030235660155,
|
|
"loss": 0.475,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 1.3710879284649775,
|
|
"grad_norm": 0.059185780584812164,
|
|
"learning_rate": 0.00011774995036500181,
|
|
"loss": 0.4714,
|
|
"step": 920
|
|
},
|
|
{
|
|
"epoch": 1.4008941877794336,
|
|
"grad_norm": 0.05000292509794235,
|
|
"learning_rate": 0.00011459141480135518,
|
|
"loss": 0.4774,
|
|
"step": 940
|
|
},
|
|
{
|
|
"epoch": 1.4307004470938898,
|
|
"grad_norm": 0.05054055526852608,
|
|
"learning_rate": 0.00011141793136253986,
|
|
"loss": 0.4666,
|
|
"step": 960
|
|
},
|
|
{
|
|
"epoch": 1.4605067064083457,
|
|
"grad_norm": 0.04777640849351883,
|
|
"learning_rate": 0.00010823275105847772,
|
|
"loss": 0.4711,
|
|
"step": 980
|
|
},
|
|
{
|
|
"epoch": 1.4903129657228018,
|
|
"grad_norm": 0.05582300201058388,
|
|
"learning_rate": 0.00010503913688170396,
|
|
"loss": 0.4668,
|
|
"step": 1000
|
|
}
|
|
],
|
|
"logging_steps": 20,
|
|
"max_steps": 2013,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.6835684982666035e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|