wangrongsheng's picture
add v6
9a4e9bc
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.210857354276681,
"eval_steps": 500,
"global_step": 9000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.9997567688496474e-05,
"loss": 2.9132,
"step": 100
},
{
"epoch": 0.03,
"learning_rate": 4.9990172715142793e-05,
"loss": 0.0721,
"step": 200
},
{
"epoch": 0.04,
"learning_rate": 4.997781629993153e-05,
"loss": 0.0757,
"step": 300
},
{
"epoch": 0.05,
"learning_rate": 4.9960500896052476e-05,
"loss": 0.0599,
"step": 400
},
{
"epoch": 0.07,
"learning_rate": 4.9938477167054135e-05,
"loss": 0.0353,
"step": 500
},
{
"epoch": 0.08,
"learning_rate": 4.9911304569050045e-05,
"loss": 0.0256,
"step": 600
},
{
"epoch": 0.09,
"learning_rate": 4.987918618733232e-05,
"loss": 0.0173,
"step": 700
},
{
"epoch": 0.11,
"learning_rate": 4.98421283985469e-05,
"loss": 0.0276,
"step": 800
},
{
"epoch": 0.12,
"learning_rate": 4.9800138559988466e-05,
"loss": 0.0225,
"step": 900
},
{
"epoch": 0.13,
"learning_rate": 4.97532250081397e-05,
"loss": 0.026,
"step": 1000
},
{
"epoch": 0.15,
"learning_rate": 4.970139705701628e-05,
"loss": 0.0271,
"step": 1100
},
{
"epoch": 0.16,
"learning_rate": 4.9644664996317616e-05,
"loss": 0.0231,
"step": 1200
},
{
"epoch": 0.17,
"learning_rate": 4.958304008938407e-05,
"loss": 0.0196,
"step": 1300
},
{
"epoch": 0.19,
"learning_rate": 4.951653457096072e-05,
"loss": 0.0124,
"step": 1400
},
{
"epoch": 0.2,
"learning_rate": 4.944516164476834e-05,
"loss": 0.0159,
"step": 1500
},
{
"epoch": 0.22,
"learning_rate": 4.9368935480882034e-05,
"loss": 0.0144,
"step": 1600
},
{
"epoch": 0.23,
"learning_rate": 4.9287871212917866e-05,
"loss": 0.0182,
"step": 1700
},
{
"epoch": 0.24,
"learning_rate": 4.9201984935028426e-05,
"loss": 0.018,
"step": 1800
},
{
"epoch": 0.26,
"learning_rate": 4.911129369870746e-05,
"loss": 0.0156,
"step": 1900
},
{
"epoch": 0.27,
"learning_rate": 4.90158155094046e-05,
"loss": 0.0146,
"step": 2000
},
{
"epoch": 0.28,
"learning_rate": 4.8915569322950615e-05,
"loss": 0.0196,
"step": 2100
},
{
"epoch": 0.3,
"learning_rate": 4.8810575041794e-05,
"loss": 0.02,
"step": 2200
},
{
"epoch": 0.31,
"learning_rate": 4.8700853511049656e-05,
"loss": 0.0143,
"step": 2300
},
{
"epoch": 0.32,
"learning_rate": 4.858642651436035e-05,
"loss": 0.0111,
"step": 2400
},
{
"epoch": 0.34,
"learning_rate": 4.846731676957191e-05,
"loss": 0.0179,
"step": 2500
},
{
"epoch": 0.35,
"learning_rate": 4.834354792422293e-05,
"loss": 0.0133,
"step": 2600
},
{
"epoch": 0.36,
"learning_rate": 4.821514455084985e-05,
"loss": 0.0112,
"step": 2700
},
{
"epoch": 0.38,
"learning_rate": 4.8082132142108465e-05,
"loss": 0.0131,
"step": 2800
},
{
"epoch": 0.39,
"learning_rate": 4.794453710571272e-05,
"loss": 0.0134,
"step": 2900
},
{
"epoch": 0.4,
"learning_rate": 4.780238675919182e-05,
"loss": 0.0084,
"step": 3000
},
{
"epoch": 0.42,
"learning_rate": 4.765570932446672e-05,
"loss": 0.0069,
"step": 3100
},
{
"epoch": 0.43,
"learning_rate": 4.75045339222471e-05,
"loss": 0.0186,
"step": 3200
},
{
"epoch": 0.44,
"learning_rate": 4.734889056624983e-05,
"loss": 0.0138,
"step": 3300
},
{
"epoch": 0.46,
"learning_rate": 4.718881015724017e-05,
"loss": 0.012,
"step": 3400
},
{
"epoch": 0.47,
"learning_rate": 4.702432447689692e-05,
"loss": 0.0121,
"step": 3500
},
{
"epoch": 0.48,
"learning_rate": 4.6855466181502544e-05,
"loss": 0.0104,
"step": 3600
},
{
"epoch": 0.5,
"learning_rate": 4.66822687954598e-05,
"loss": 0.014,
"step": 3700
},
{
"epoch": 0.51,
"learning_rate": 4.65047667046359e-05,
"loss": 0.0085,
"step": 3800
},
{
"epoch": 0.52,
"learning_rate": 4.632299514953571e-05,
"loss": 0.0111,
"step": 3900
},
{
"epoch": 0.54,
"learning_rate": 4.613699021830524e-05,
"loss": 0.0122,
"step": 4000
},
{
"epoch": 0.55,
"learning_rate": 4.59467888395669e-05,
"loss": 0.0082,
"step": 4100
},
{
"epoch": 0.57,
"learning_rate": 4.575242877508777e-05,
"loss": 0.0087,
"step": 4200
},
{
"epoch": 0.58,
"learning_rate": 4.5553948612282607e-05,
"loss": 0.0115,
"step": 4300
},
{
"epoch": 0.59,
"learning_rate": 4.5351387756552846e-05,
"loss": 0.0069,
"step": 4400
},
{
"epoch": 0.61,
"learning_rate": 4.51447864234632e-05,
"loss": 0.0099,
"step": 4500
},
{
"epoch": 0.62,
"learning_rate": 4.4934185630757484e-05,
"loss": 0.0087,
"step": 4600
},
{
"epoch": 0.63,
"learning_rate": 4.4719627190215064e-05,
"loss": 0.0096,
"step": 4700
},
{
"epoch": 0.65,
"learning_rate": 4.450115369934976e-05,
"loss": 0.0122,
"step": 4800
},
{
"epoch": 0.66,
"learning_rate": 4.427880853295274e-05,
"loss": 0.0076,
"step": 4900
},
{
"epoch": 0.67,
"learning_rate": 4.4052635834481025e-05,
"loss": 0.0071,
"step": 5000
},
{
"epoch": 0.69,
"learning_rate": 4.3822680507293455e-05,
"loss": 0.0105,
"step": 5100
},
{
"epoch": 0.7,
"learning_rate": 4.358898820573581e-05,
"loss": 0.0108,
"step": 5200
},
{
"epoch": 0.71,
"learning_rate": 4.3351605326076724e-05,
"loss": 0.0063,
"step": 5300
},
{
"epoch": 0.73,
"learning_rate": 4.3110578997296416e-05,
"loss": 0.0147,
"step": 5400
},
{
"epoch": 0.74,
"learning_rate": 4.286595707172986e-05,
"loss": 0.0135,
"step": 5500
},
{
"epoch": 0.75,
"learning_rate": 4.261778811556646e-05,
"loss": 0.0105,
"step": 5600
},
{
"epoch": 0.77,
"learning_rate": 4.236612139920786e-05,
"loss": 0.0131,
"step": 5700
},
{
"epoch": 0.78,
"learning_rate": 4.2111006887486035e-05,
"loss": 0.0085,
"step": 5800
},
{
"epoch": 0.79,
"learning_rate": 4.185249522974346e-05,
"loss": 0.0052,
"step": 5900
},
{
"epoch": 0.81,
"learning_rate": 4.159063774977748e-05,
"loss": 0.008,
"step": 6000
},
{
"epoch": 0.82,
"learning_rate": 4.1325486435650625e-05,
"loss": 0.011,
"step": 6100
},
{
"epoch": 0.83,
"learning_rate": 4.105709392936914e-05,
"loss": 0.0074,
"step": 6200
},
{
"epoch": 0.85,
"learning_rate": 4.0785513516431705e-05,
"loss": 0.0147,
"step": 6300
},
{
"epoch": 0.86,
"learning_rate": 4.051079911525031e-05,
"loss": 0.0049,
"step": 6400
},
{
"epoch": 0.87,
"learning_rate": 4.023300526644557e-05,
"loss": 0.0075,
"step": 6500
},
{
"epoch": 0.89,
"learning_rate": 3.995501009115527e-05,
"loss": 0.0114,
"step": 6600
},
{
"epoch": 0.9,
"learning_rate": 3.967125281105033e-05,
"loss": 0.0089,
"step": 6700
},
{
"epoch": 0.91,
"learning_rate": 3.93845827632495e-05,
"loss": 0.0066,
"step": 6800
},
{
"epoch": 0.93,
"learning_rate": 3.909505686199625e-05,
"loss": 0.0079,
"step": 6900
},
{
"epoch": 0.94,
"learning_rate": 3.880273258852296e-05,
"loss": 0.008,
"step": 7000
},
{
"epoch": 0.96,
"learning_rate": 3.850766797963886e-05,
"loss": 0.0068,
"step": 7100
},
{
"epoch": 0.97,
"learning_rate": 3.8209921616207645e-05,
"loss": 0.0069,
"step": 7200
},
{
"epoch": 0.98,
"learning_rate": 3.790955261151704e-05,
"loss": 0.0088,
"step": 7300
},
{
"epoch": 1.0,
"learning_rate": 3.7606620599542756e-05,
"loss": 0.0078,
"step": 7400
},
{
"epoch": 1.01,
"learning_rate": 3.730118572310899e-05,
"loss": 0.008,
"step": 7500
},
{
"epoch": 1.02,
"learning_rate": 3.699330862194794e-05,
"loss": 0.0057,
"step": 7600
},
{
"epoch": 1.04,
"learning_rate": 3.668305042066061e-05,
"loss": 0.0046,
"step": 7700
},
{
"epoch": 1.05,
"learning_rate": 3.637047271658145e-05,
"loss": 0.0059,
"step": 7800
},
{
"epoch": 1.06,
"learning_rate": 3.605563756754904e-05,
"loss": 0.0046,
"step": 7900
},
{
"epoch": 1.08,
"learning_rate": 3.573860747958544e-05,
"loss": 0.0085,
"step": 8000
},
{
"epoch": 1.09,
"learning_rate": 3.541944539448648e-05,
"loss": 0.0064,
"step": 8100
},
{
"epoch": 1.1,
"learning_rate": 3.509821467732553e-05,
"loss": 0.0064,
"step": 8200
},
{
"epoch": 1.12,
"learning_rate": 3.477822117325554e-05,
"loss": 0.0061,
"step": 8300
},
{
"epoch": 1.13,
"learning_rate": 3.445306400520726e-05,
"loss": 0.0087,
"step": 8400
},
{
"epoch": 1.14,
"learning_rate": 3.4126030066319e-05,
"loss": 0.0042,
"step": 8500
},
{
"epoch": 1.16,
"learning_rate": 3.379718428450832e-05,
"loss": 0.0071,
"step": 8600
},
{
"epoch": 1.17,
"learning_rate": 3.346659194740827e-05,
"loss": 0.0043,
"step": 8700
},
{
"epoch": 1.18,
"learning_rate": 3.313431868940551e-05,
"loss": 0.0062,
"step": 8800
},
{
"epoch": 1.2,
"learning_rate": 3.280043047860958e-05,
"loss": 0.0096,
"step": 8900
},
{
"epoch": 1.21,
"learning_rate": 3.24649936037558e-05,
"loss": 0.0049,
"step": 9000
}
],
"logging_steps": 100,
"max_steps": 22296,
"num_train_epochs": 3,
"save_steps": 3000,
"total_flos": 3.6356765746121933e+18,
"trial_name": null,
"trial_params": null
}