Rodo-Sami commited on
Commit
dcf924f
·
verified ·
1 Parent(s): 1a41b27

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e266e75392258f32eb547255edaeea06d0619e143e7dc50938603bc34d07fdee
3
  size 239536272
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:218e913a2a7db4df827e42b0f4a8c1863bed5517695546f706eab8453c5a6833
3
  size 239536272
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:691f3aa812f3e4280800e271f9d6ad3695a14b934cb3a354f38f514af1ee6691
3
  size 479362682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:589379e87ee87ca462d69e9af26f4861fbfa09365326531b5b50df0f51ebc336
3
  size 479362682
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00f436e5fb00a5d5f25c590834a43f11d8673bcc1e6228808f3d06e7342e1f7f
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac8e4f5fd2c1ebfd789e72bc9acd493f90e6a8749bf1704d9dfae1e9b9474b3
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:042a96cf5e4774965074a5cad8519f695e7e99713a229326aef912e9bc430af0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf4b3ed4d6beee57baa6b2e89879da453d373dabc0237ff451065abe34b539a5
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8ceccd5d6c4ebec1c2f89d430c5e97c2150995c754765a9c28b13a9d7b063c9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9495fb036dbbb89294b35739f1a6d332b40a48a1c9c7b03eb1c7fba55cdff575
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5a81c7e30751a346bb5694351b1ab11e801f1365384f4b4197186403f8a5d4c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b9bd03e22f54cdccd438fed24ab7cd3883a3335343b3d923270ecceb7008553
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8d5f446d81df8b8e6d5d3423a874831fec3d08bffbe4980db71b54ceb3e7bd4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d60a69e2379be2053e816cbaff31e6c931b5922dd86c71c9eaf473299cbf62
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.4859658479690552,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
- "epoch": 0.04156132026460707,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -564,6 +564,189 @@
564
  "eval_samples_per_second": 56.962,
565
  "eval_steps_per_second": 3.561,
566
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -587,12 +770,12 @@
587
  "should_evaluate": false,
588
  "should_log": false,
589
  "should_save": true,
590
- "should_training_stop": false
591
  },
592
  "attributes": {}
593
  }
594
  },
595
- "total_flos": 6.687756927369216e+17,
596
  "train_batch_size": 4,
597
  "trial_name": null,
598
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.4827420711517334,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.055415093686142766,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
564
  "eval_samples_per_second": 56.962,
565
  "eval_steps_per_second": 3.561,
566
  "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.0421154712014685,
570
+ "grad_norm": 0.3435385823249817,
571
+ "learning_rate": 1.4938160786375572e-05,
572
+ "loss": 1.3958,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.04266962213832993,
577
+ "grad_norm": 0.42353853583335876,
578
+ "learning_rate": 1.3778739760445552e-05,
579
+ "loss": 1.57,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.04322377307519135,
584
+ "grad_norm": 0.39138633012771606,
585
+ "learning_rate": 1.2658926150792322e-05,
586
+ "loss": 1.5377,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.043777924012052785,
591
+ "grad_norm": 0.4724353849887848,
592
+ "learning_rate": 1.157994445715706e-05,
593
+ "loss": 1.5676,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.04433207494891421,
598
+ "grad_norm": 0.44573283195495605,
599
+ "learning_rate": 1.0542974530180327e-05,
600
+ "loss": 1.5036,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.044886225885775635,
605
+ "grad_norm": 0.44128575921058655,
606
+ "learning_rate": 9.549150281252633e-06,
607
+ "loss": 1.5197,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.04544037682263707,
612
+ "grad_norm": 0.4542405903339386,
613
+ "learning_rate": 8.599558442598998e-06,
614
+ "loss": 1.4969,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.04599452775949849,
619
+ "grad_norm": 0.44254857301712036,
620
+ "learning_rate": 7.695237378953223e-06,
621
+ "loss": 1.4735,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.046548678696359924,
626
+ "grad_norm": 0.47012338042259216,
627
+ "learning_rate": 6.837175952121306e-06,
628
+ "loss": 1.525,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.04710282963322135,
633
+ "grad_norm": 0.5244672894477844,
634
+ "learning_rate": 6.026312439675552e-06,
635
+ "loss": 1.4543,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.047656980570082774,
640
+ "grad_norm": 0.5304999351501465,
641
+ "learning_rate": 5.263533508961827e-06,
642
+ "loss": 1.4387,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.048211131506944206,
647
+ "grad_norm": 0.6247049570083618,
648
+ "learning_rate": 4.549673247541875e-06,
649
+ "loss": 1.4606,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.04876528244380563,
654
+ "grad_norm": 0.40863263607025146,
655
+ "learning_rate": 3.885512251130763e-06,
656
+ "loss": 1.5058,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.049319433380667056,
661
+ "grad_norm": 0.3656030297279358,
662
+ "learning_rate": 3.271776770026963e-06,
663
+ "loss": 1.4232,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.04987358431752849,
668
+ "grad_norm": 0.39725741744041443,
669
+ "learning_rate": 2.7091379149682685e-06,
670
+ "loss": 1.4841,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.05042773525438991,
675
+ "grad_norm": 0.4447474777698517,
676
+ "learning_rate": 2.1982109232821178e-06,
677
+ "loss": 1.5307,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.050981886191251345,
682
+ "grad_norm": 0.4279220998287201,
683
+ "learning_rate": 1.7395544861325718e-06,
684
+ "loss": 1.5223,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.05153603712811277,
689
+ "grad_norm": 0.45289361476898193,
690
+ "learning_rate": 1.333670137599713e-06,
691
+ "loss": 1.5057,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.052090188064974195,
696
+ "grad_norm": 0.4581681787967682,
697
+ "learning_rate": 9.810017062595322e-07,
698
+ "loss": 1.4999,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.05264433900183563,
703
+ "grad_norm": 0.47461172938346863,
704
+ "learning_rate": 6.819348298638839e-07,
705
+ "loss": 1.481,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.05319848993869705,
710
+ "grad_norm": 0.4691740870475769,
711
+ "learning_rate": 4.367965336512403e-07,
712
+ "loss": 1.453,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.05375264087555848,
717
+ "grad_norm": 0.4813995063304901,
718
+ "learning_rate": 2.458548727494292e-07,
719
+ "loss": 1.4591,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.05430679181241991,
724
+ "grad_norm": 0.598747730255127,
725
+ "learning_rate": 1.0931863906127327e-07,
726
+ "loss": 1.4327,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.054860942749281334,
731
+ "grad_norm": 0.6092488765716553,
732
+ "learning_rate": 2.7337132953697554e-08,
733
+ "loss": 1.4965,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.055415093686142766,
738
+ "grad_norm": 0.6786034107208252,
739
+ "learning_rate": 0.0,
740
+ "loss": 1.5936,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.055415093686142766,
745
+ "eval_loss": 1.4827420711517334,
746
+ "eval_runtime": 418.5722,
747
+ "eval_samples_per_second": 58.088,
748
+ "eval_steps_per_second": 3.631,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
+ "should_training_stop": true
774
  },
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 8.917009236492288e+17,
779
  "train_batch_size": 4,
780
  "trial_name": null,
781
  "trial_params": null