Training in progress, epoch 19, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 778096664
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ee5576c5f1026abaeac1844024bd4d9b26c80d3552b0a02a96d7db04ae8d0bf
|
3 |
size 778096664
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 395561780
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dad3791c8f2b4864fb5026c9f40cea0748aba372d869400d34dc8ece1a4c56f4
|
3 |
size 395561780
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c013855f1f203be97dafecaefb623110a70641991f8c85f3b86fa91a81df5017
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28df73b3d2574d5e7d5989c9c1976ff625cb4c34411f21da816c1ceb48b6e74b
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.002446170896291733,
|
3 |
"best_model_checkpoint": "/Workspace/Users/[email protected]/Projects/FRED/models/unsloth/Llama-3.2-3B-Instruct/checkpoint-5250",
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -592,6 +592,35 @@
|
|
592 |
"eval_samples_per_second": 10.643,
|
593 |
"eval_steps_per_second": 2.661,
|
594 |
"step": 6300
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
595 |
}
|
596 |
],
|
597 |
"logging_steps": 100,
|
@@ -611,7 +640,7 @@
|
|
611 |
"attributes": {}
|
612 |
}
|
613 |
},
|
614 |
-
"total_flos": 1.
|
615 |
"train_batch_size": 4,
|
616 |
"trial_name": null,
|
617 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.002446170896291733,
|
3 |
"best_model_checkpoint": "/Workspace/Users/[email protected]/Projects/FRED/models/unsloth/Llama-3.2-3B-Instruct/checkpoint-5250",
|
4 |
+
"epoch": 19.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 6650,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
592 |
"eval_samples_per_second": 10.643,
|
593 |
"eval_steps_per_second": 2.661,
|
594 |
"step": 6300
|
595 |
+
},
|
596 |
+
{
|
597 |
+
"epoch": 18.285714285714285,
|
598 |
+
"grad_norm": 0.000776465458329767,
|
599 |
+
"learning_rate": 4.316546762589929e-06,
|
600 |
+
"loss": -0.0001,
|
601 |
+
"step": 6400
|
602 |
+
},
|
603 |
+
{
|
604 |
+
"epoch": 18.571428571428573,
|
605 |
+
"grad_norm": 0.0008999446872621775,
|
606 |
+
"learning_rate": 3.5971223021582732e-06,
|
607 |
+
"loss": -0.0001,
|
608 |
+
"step": 6500
|
609 |
+
},
|
610 |
+
{
|
611 |
+
"epoch": 18.857142857142858,
|
612 |
+
"grad_norm": 0.0010123606771230698,
|
613 |
+
"learning_rate": 2.877697841726619e-06,
|
614 |
+
"loss": -0.0001,
|
615 |
+
"step": 6600
|
616 |
+
},
|
617 |
+
{
|
618 |
+
"epoch": 19.0,
|
619 |
+
"eval_loss": 0.002609863178804517,
|
620 |
+
"eval_runtime": 65.3789,
|
621 |
+
"eval_samples_per_second": 10.707,
|
622 |
+
"eval_steps_per_second": 2.677,
|
623 |
+
"step": 6650
|
624 |
}
|
625 |
],
|
626 |
"logging_steps": 100,
|
|
|
640 |
"attributes": {}
|
641 |
}
|
642 |
},
|
643 |
+
"total_flos": 1.879853696212992e+17,
|
644 |
"train_batch_size": 4,
|
645 |
"trial_name": null,
|
646 |
"trial_params": null
|