0x1202 commited on
Commit
1becd67
·
verified ·
1 Parent(s): e3df82f

Training in progress, step 130, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9692744b48db18fa4e4208eca6c93f9c09bf94a432e0e61f592cb15c2ce7b8ed
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:449464db766d63860123fe585eb2ea917a236cabadecd7c6c7a38b33a67a1128
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e5bb09298c4244d44cfbf7a5d6da5b4b2009c455fa622da155c3110db955846
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b6a2a7a6ef3cc7ec0da7330f4cbbf8c921fc40740eb8aa8fc148cef03435478
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f4169c04cb7870f2b433cea736eca37c4197f487fcf83cf802a73d112dbe27f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9222f49813f2d37090b6c3ba6ddf1267525395ace62444cf594c70d8427f4f5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd52d52fe0f0348e0615f49790d582d99bc965e6e8d769157b68f404ab1ba391
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367bcdbb471387c2c2f46f68f85951017299d77f7594d07a196d5f5b5d01911a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.20296970009803772,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 1.5466408893185113,
5
  "eval_steps": 25,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -747,6 +747,224 @@
747
  "eval_samples_per_second": 6.839,
748
  "eval_steps_per_second": 6.839,
749
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
  }
751
  ],
752
  "logging_steps": 1,
@@ -770,12 +988,12 @@
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
- "should_training_stop": false
774
  },
775
  "attributes": {}
776
  }
777
  },
778
- "total_flos": 1.579466434711388e+17,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null
 
1
  {
2
  "best_metric": 0.20296970009803772,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 2.0106331561140647,
5
  "eval_steps": 25,
6
+ "global_step": 130,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
747
  "eval_samples_per_second": 6.839,
748
  "eval_steps_per_second": 6.839,
749
  "step": 100
750
+ },
751
+ {
752
+ "epoch": 1.5621072982116964,
753
+ "grad_norm": 0.1923961192369461,
754
+ "learning_rate": 4.1193844348156886e-05,
755
+ "loss": 0.1462,
756
+ "step": 101
757
+ },
758
+ {
759
+ "epoch": 1.5775737071048814,
760
+ "grad_norm": 0.18478325009346008,
761
+ "learning_rate": 3.852827617839084e-05,
762
+ "loss": 0.1648,
763
+ "step": 102
764
+ },
765
+ {
766
+ "epoch": 1.5930401159980667,
767
+ "grad_norm": 0.30043265223503113,
768
+ "learning_rate": 3.593910515999536e-05,
769
+ "loss": 0.1844,
770
+ "step": 103
771
+ },
772
+ {
773
+ "epoch": 1.6085065248912518,
774
+ "grad_norm": 0.5838200449943542,
775
+ "learning_rate": 3.342810578145436e-05,
776
+ "loss": 0.2355,
777
+ "step": 104
778
+ },
779
+ {
780
+ "epoch": 1.623972933784437,
781
+ "grad_norm": 0.5884765386581421,
782
+ "learning_rate": 3.099699895631474e-05,
783
+ "loss": 0.2287,
784
+ "step": 105
785
+ },
786
+ {
787
+ "epoch": 1.6394393426776221,
788
+ "grad_norm": 0.4631075859069824,
789
+ "learning_rate": 2.8647450843757897e-05,
790
+ "loss": 0.2004,
791
+ "step": 106
792
+ },
793
+ {
794
+ "epoch": 1.6549057515708072,
795
+ "grad_norm": 0.3879186809062958,
796
+ "learning_rate": 2.6381071706697644e-05,
797
+ "loss": 0.1996,
798
+ "step": 107
799
+ },
800
+ {
801
+ "epoch": 1.6703721604639923,
802
+ "grad_norm": 0.2909069359302521,
803
+ "learning_rate": 2.4199414808186406e-05,
804
+ "loss": 0.1985,
805
+ "step": 108
806
+ },
807
+ {
808
+ "epoch": 1.6858385693571774,
809
+ "grad_norm": 0.34202706813812256,
810
+ "learning_rate": 2.210397534688617e-05,
811
+ "loss": 0.1848,
812
+ "step": 109
813
+ },
814
+ {
815
+ "epoch": 1.7013049782503624,
816
+ "grad_norm": 0.3348611295223236,
817
+ "learning_rate": 2.009618943233419e-05,
818
+ "loss": 0.2004,
819
+ "step": 110
820
+ },
821
+ {
822
+ "epoch": 1.7167713871435475,
823
+ "grad_norm": 0.4890515208244324,
824
+ "learning_rate": 1.8177433100705207e-05,
825
+ "loss": 0.2107,
826
+ "step": 111
827
+ },
828
+ {
829
+ "epoch": 1.7322377960367326,
830
+ "grad_norm": 0.560546875,
831
+ "learning_rate": 1.634902137174483e-05,
832
+ "loss": 0.2933,
833
+ "step": 112
834
+ },
835
+ {
836
+ "epoch": 1.7477042049299178,
837
+ "grad_norm": 0.5725692510604858,
838
+ "learning_rate": 1.4612207347520938e-05,
839
+ "loss": 0.2927,
840
+ "step": 113
841
+ },
842
+ {
843
+ "epoch": 1.763170613823103,
844
+ "grad_norm": 0.33504927158355713,
845
+ "learning_rate": 1.2968181353609852e-05,
846
+ "loss": 0.1681,
847
+ "step": 114
848
+ },
849
+ {
850
+ "epoch": 1.7786370227162882,
851
+ "grad_norm": 0.34945377707481384,
852
+ "learning_rate": 1.1418070123306989e-05,
853
+ "loss": 0.1594,
854
+ "step": 115
855
+ },
856
+ {
857
+ "epoch": 1.7941034316094733,
858
+ "grad_norm": 0.31711897253990173,
859
+ "learning_rate": 9.962936025419754e-06,
860
+ "loss": 0.1475,
861
+ "step": 116
862
+ },
863
+ {
864
+ "epoch": 1.8095698405026583,
865
+ "grad_norm": 0.25584596395492554,
866
+ "learning_rate": 8.603776336173235e-06,
867
+ "loss": 0.1476,
868
+ "step": 117
869
+ },
870
+ {
871
+ "epoch": 1.8250362493958434,
872
+ "grad_norm": 0.34091395139694214,
873
+ "learning_rate": 7.34152255572697e-06,
874
+ "loss": 0.1856,
875
+ "step": 118
876
+ },
877
+ {
878
+ "epoch": 1.8405026582890285,
879
+ "grad_norm": 0.3409639894962311,
880
+ "learning_rate": 6.1770397697710414e-06,
881
+ "loss": 0.1982,
882
+ "step": 119
883
+ },
884
+ {
885
+ "epoch": 1.8559690671822135,
886
+ "grad_norm": 0.2535560131072998,
887
+ "learning_rate": 5.11112605663977e-06,
888
+ "loss": 0.1881,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 1.8714354760753986,
893
+ "grad_norm": 0.26184314489364624,
894
+ "learning_rate": 4.144511940348516e-06,
895
+ "loss": 0.1976,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 1.886901884968584,
900
+ "grad_norm": 0.28429871797561646,
901
+ "learning_rate": 3.2778598899291465e-06,
902
+ "loss": 0.1938,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 1.902368293861769,
907
+ "grad_norm": 0.23394553363323212,
908
+ "learning_rate": 2.511763865406824e-06,
909
+ "loss": 0.1655,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 1.917834702754954,
914
+ "grad_norm": 0.36569544672966003,
915
+ "learning_rate": 1.8467489107293509e-06,
916
+ "loss": 0.2076,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 1.9333011116481393,
921
+ "grad_norm": 0.38919389247894287,
922
+ "learning_rate": 1.2832707939284427e-06,
923
+ "loss": 0.1977,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 1.9333011116481393,
928
+ "eval_loss": 0.18706867098808289,
929
+ "eval_runtime": 7.3048,
930
+ "eval_samples_per_second": 6.845,
931
+ "eval_steps_per_second": 6.845,
932
+ "step": 125
933
+ },
934
+ {
935
+ "epoch": 1.9487675205413244,
936
+ "grad_norm": 0.2989993989467621,
937
+ "learning_rate": 8.217156947590064e-07,
938
+ "loss": 0.2162,
939
+ "step": 126
940
+ },
941
+ {
942
+ "epoch": 1.9642339294345095,
943
+ "grad_norm": 0.41685956716537476,
944
+ "learning_rate": 4.623999400308054e-07,
945
+ "loss": 0.2151,
946
+ "step": 127
947
+ },
948
+ {
949
+ "epoch": 1.9797003383276945,
950
+ "grad_norm": 0.2608714699745178,
951
+ "learning_rate": 2.05569786813925e-07,
952
+ "loss": 0.2491,
953
+ "step": 128
954
+ },
955
+ {
956
+ "epoch": 1.9951667472208796,
957
+ "grad_norm": 0.4548462927341461,
958
+ "learning_rate": 5.1401253666411016e-08,
959
+ "loss": 0.333,
960
+ "step": 129
961
+ },
962
+ {
963
+ "epoch": 2.0106331561140647,
964
+ "grad_norm": 0.17462550103664398,
965
+ "learning_rate": 0.0,
966
+ "loss": 0.1465,
967
+ "step": 130
968
  }
969
  ],
970
  "logging_steps": 1,
 
988
  "should_evaluate": false,
989
  "should_log": false,
990
  "should_save": true,
991
+ "should_training_stop": true
992
  },
993
  "attributes": {}
994
  }
995
  },
996
+ "total_flos": 2.052912237505413e+17,
997
  "train_batch_size": 1,
998
  "trial_name": null,
999
  "trial_params": null