{ "best_metric": 0.17973944544792175, "best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-800_batch_256_2024-03-01_ppid_6/checkpoint-400", "epoch": 3.9603960396039604, "eval_steps": 100, "global_step": 400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.2, "grad_norm": 2.1325972080230713, "learning_rate": 4.5e-05, "loss": 1.424, "step": 20 }, { "epoch": 0.4, "grad_norm": 0.3059396743774414, "learning_rate": 9.5e-05, "loss": 0.5357, "step": 40 }, { "epoch": 0.59, "grad_norm": 0.20484383404254913, "learning_rate": 0.000145, "loss": 0.4403, "step": 60 }, { "epoch": 0.79, "grad_norm": 0.3068823516368866, "learning_rate": 0.000195, "loss": 0.4135, "step": 80 }, { "epoch": 0.99, "grad_norm": 0.1791614145040512, "learning_rate": 0.0001996917333733128, "loss": 0.3859, "step": 100 }, { "epoch": 0.99, "eval_loss": 0.3723442554473877, "eval_runtime": 93.8197, "eval_samples_per_second": 30.559, "eval_steps_per_second": 0.959, "step": 100 }, { "epoch": 1.19, "grad_norm": 0.18006961047649384, "learning_rate": 0.00019862856015372317, "loss": 0.3567, "step": 120 }, { "epoch": 1.39, "grad_norm": 0.18006713688373566, "learning_rate": 0.0001968147640378108, "loss": 0.3436, "step": 140 }, { "epoch": 1.58, "grad_norm": 0.20077605545520782, "learning_rate": 0.00019426414910921787, "loss": 0.3322, "step": 160 }, { "epoch": 1.78, "grad_norm": 0.2087867707014084, "learning_rate": 0.00019099612708765434, "loss": 0.3143, "step": 180 }, { "epoch": 1.98, "grad_norm": 0.21649543941020966, "learning_rate": 0.00018703556959398998, "loss": 0.3011, "step": 200 }, { "epoch": 1.98, "eval_loss": 0.2929498553276062, "eval_runtime": 93.8381, "eval_samples_per_second": 30.553, "eval_steps_per_second": 0.959, "step": 200 }, { "epoch": 2.18, "grad_norm": 0.2589591145515442, "learning_rate": 0.00018241261886220154, "loss": 0.275, "step": 220 }, { "epoch": 2.38, "grad_norm": 0.22108183801174164, "learning_rate": 0.00017716245833877201, "loss": 0.2632, "step": 240 }, { "epoch": 2.57, "grad_norm": 0.233080193400383, "learning_rate": 0.00017132504491541818, "loss": 0.2515, "step": 260 }, { "epoch": 2.77, "grad_norm": 0.23667199909687042, "learning_rate": 0.00016494480483301836, "loss": 0.2407, "step": 280 }, { "epoch": 2.97, "grad_norm": 0.27916812896728516, "learning_rate": 0.00015807029557109398, "loss": 0.2293, "step": 300 }, { "epoch": 2.97, "eval_loss": 0.2265055775642395, "eval_runtime": 93.8776, "eval_samples_per_second": 30.54, "eval_steps_per_second": 0.959, "step": 300 }, { "epoch": 3.17, "grad_norm": 0.25347772240638733, "learning_rate": 0.00015075383629607042, "loss": 0.2072, "step": 320 }, { "epoch": 3.37, "grad_norm": 0.2635527551174164, "learning_rate": 0.00014305110968082952, "loss": 0.1977, "step": 340 }, { "epoch": 3.56, "grad_norm": 0.2663807272911072, "learning_rate": 0.00013502073812594675, "loss": 0.188, "step": 360 }, { "epoch": 3.76, "grad_norm": 0.2633569836616516, "learning_rate": 0.00012672383760782568, "loss": 0.1842, "step": 380 }, { "epoch": 3.96, "grad_norm": 0.2857445478439331, "learning_rate": 0.00011822355254921478, "loss": 0.1728, "step": 400 }, { "epoch": 3.96, "eval_loss": 0.17973944544792175, "eval_runtime": 93.8682, "eval_samples_per_second": 30.543, "eval_steps_per_second": 0.959, "step": 400 } ], "logging_steps": 20, "max_steps": 800, "num_input_tokens_seen": 0, "num_train_epochs": 8, "save_steps": 100, "total_flos": 3.644899239558906e+18, "train_batch_size": 16, "trial_name": null, "trial_params": null }