File size: 4,197 Bytes
e154418 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7976071784646062,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03988035892323031,
"grad_norm": 16.628543853759766,
"learning_rate": 0.00012,
"loss": 2.1274,
"step": 10
},
{
"epoch": 0.07976071784646062,
"grad_norm": 12.371907234191895,
"learning_rate": 0.00019950829025450114,
"loss": 0.7722,
"step": 20
},
{
"epoch": 0.11964107676969092,
"grad_norm": 9.106270790100098,
"learning_rate": 0.00019652089102773488,
"loss": 0.4456,
"step": 30
},
{
"epoch": 0.15952143569292124,
"grad_norm": 5.712669372558594,
"learning_rate": 0.00019090065350491626,
"loss": 0.3662,
"step": 40
},
{
"epoch": 0.19940179461615154,
"grad_norm": 2.922630548477173,
"learning_rate": 0.00018280088311480201,
"loss": 0.2983,
"step": 50
},
{
"epoch": 0.23928215353938184,
"grad_norm": 5.451596260070801,
"learning_rate": 0.00017244252047910892,
"loss": 0.3034,
"step": 60
},
{
"epoch": 0.27916251246261214,
"grad_norm": 3.604940414428711,
"learning_rate": 0.00016010811472830252,
"loss": 0.2754,
"step": 70
},
{
"epoch": 0.3190428713858425,
"grad_norm": 2.805121660232544,
"learning_rate": 0.0001461341162978688,
"loss": 0.2659,
"step": 80
},
{
"epoch": 0.3589232303090728,
"grad_norm": 3.4018733501434326,
"learning_rate": 0.00013090169943749476,
"loss": 0.2767,
"step": 90
},
{
"epoch": 0.3988035892323031,
"grad_norm": 3.0557034015655518,
"learning_rate": 0.0001148263647711842,
"loss": 0.286,
"step": 100
},
{
"epoch": 0.4386839481555334,
"grad_norm": 4.8394036293029785,
"learning_rate": 9.834660552336415e-05,
"loss": 0.2604,
"step": 110
},
{
"epoch": 0.4785643070787637,
"grad_norm": 6.2420759201049805,
"learning_rate": 8.191194656678904e-05,
"loss": 0.2447,
"step": 120
},
{
"epoch": 0.518444666001994,
"grad_norm": 3.427865505218506,
"learning_rate": 6.59706825558357e-05,
"loss": 0.282,
"step": 130
},
{
"epoch": 0.5583250249252243,
"grad_norm": 5.478673934936523,
"learning_rate": 5.095764961694922e-05,
"loss": 0.2314,
"step": 140
},
{
"epoch": 0.5982053838484547,
"grad_norm": 1.9072978496551514,
"learning_rate": 3.7282364152646297e-05,
"loss": 0.2284,
"step": 150
},
{
"epoch": 0.638085742771685,
"grad_norm": 2.6893632411956787,
"learning_rate": 2.5317852301584643e-05,
"loss": 0.2232,
"step": 160
},
{
"epoch": 0.6779661016949152,
"grad_norm": 2.540834665298462,
"learning_rate": 1.5390474757906446e-05,
"loss": 0.2421,
"step": 170
},
{
"epoch": 0.7178464606181456,
"grad_norm": 2.5203776359558105,
"learning_rate": 7.771024502261526e-06,
"loss": 0.2265,
"step": 180
},
{
"epoch": 0.7577268195413759,
"grad_norm": 2.1481897830963135,
"learning_rate": 2.667340275199426e-06,
"loss": 0.2287,
"step": 190
},
{
"epoch": 0.7976071784646062,
"grad_norm": 2.8947484493255615,
"learning_rate": 2.1863727812254653e-07,
"loss": 0.2406,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2868052066295808.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|