error577 commited on
Commit
e153bf4
·
verified ·
1 Parent(s): 5e97bb1

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91bd6e535a655553fcc688dfd66c2f6d767f0d294b1d5ac0fca9094949e9ac27
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf0982be125e82f8e3e025b7d907e472dd770e1e3f5ad83ef4473e5365a253d6
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ab50054578b1f98b38a80e31fe5d3ca80cec2025126bcda9fac6150738c26ea
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d1f012787ee2817941955f70abf4e3718aecc27b4f67763b972f97cb4a43b1
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d91819a4896d624752e6b6b60b7ae61348a5eacfbc001dd4c36a43c13bfb13d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb9a3ea44ec53a22070bc6551b352712629b97dc15f980273371583386c8e0ce
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:beb167787cdd9594eed637547cf4a56f4a4d8ea359757a120c76bb5d91190782
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a60c7d771c1fd156acee762fba03c724cb41829a3f71df370ecd1d20b134982
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.040029354860230834,
5
  "eval_steps": 7,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -620,6 +620,213 @@
620
  "learning_rate": 3.5721239031346066e-05,
621
  "loss": 1.1504,
622
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623
  }
624
  ],
625
  "logging_steps": 1,
@@ -634,12 +841,12 @@
634
  "should_evaluate": false,
635
  "should_log": false,
636
  "should_save": true,
637
- "should_training_stop": false
638
  },
639
  "attributes": {}
640
  }
641
  },
642
- "total_flos": 6047170899738624.0,
643
  "train_batch_size": 2,
644
  "trial_name": null,
645
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05337247314697445,
5
  "eval_steps": 7,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
620
  "learning_rate": 3.5721239031346066e-05,
621
  "loss": 1.1504,
622
  "step": 75
623
+ },
624
+ {
625
+ "epoch": 0.04056307959170058,
626
+ "grad_norm": 0.29981729388237,
627
+ "learning_rate": 3.308693936411421e-05,
628
+ "loss": 0.9346,
629
+ "step": 76
630
+ },
631
+ {
632
+ "epoch": 0.041096804323170325,
633
+ "grad_norm": 0.32428425550460815,
634
+ "learning_rate": 3.053416295410026e-05,
635
+ "loss": 1.1467,
636
+ "step": 77
637
+ },
638
+ {
639
+ "epoch": 0.041096804323170325,
640
+ "eval_loss": 1.2112306356430054,
641
+ "eval_runtime": 132.0592,
642
+ "eval_samples_per_second": 11.949,
643
+ "eval_steps_per_second": 5.975,
644
+ "step": 77
645
+ },
646
+ {
647
+ "epoch": 0.04163052905464007,
648
+ "grad_norm": 0.31357115507125854,
649
+ "learning_rate": 2.8066019966134904e-05,
650
+ "loss": 1.1449,
651
+ "step": 78
652
+ },
653
+ {
654
+ "epoch": 0.042164253786109815,
655
+ "grad_norm": 0.28956103324890137,
656
+ "learning_rate": 2.5685517452260567e-05,
657
+ "loss": 0.9929,
658
+ "step": 79
659
+ },
660
+ {
661
+ "epoch": 0.04269797851757956,
662
+ "grad_norm": 0.3215799927711487,
663
+ "learning_rate": 2.339555568810221e-05,
664
+ "loss": 0.95,
665
+ "step": 80
666
+ },
667
+ {
668
+ "epoch": 0.043231703249049305,
669
+ "grad_norm": 0.32306861877441406,
670
+ "learning_rate": 2.119892463932781e-05,
671
+ "loss": 1.3405,
672
+ "step": 81
673
+ },
674
+ {
675
+ "epoch": 0.04376542798051905,
676
+ "grad_norm": 0.34628739953041077,
677
+ "learning_rate": 1.9098300562505266e-05,
678
+ "loss": 1.3823,
679
+ "step": 82
680
+ },
681
+ {
682
+ "epoch": 0.04429915271198879,
683
+ "grad_norm": 0.41880619525909424,
684
+ "learning_rate": 1.7096242744495837e-05,
685
+ "loss": 1.2145,
686
+ "step": 83
687
+ },
688
+ {
689
+ "epoch": 0.044832877443458534,
690
+ "grad_norm": 0.34489524364471436,
691
+ "learning_rate": 1.5195190384357404e-05,
692
+ "loss": 1.2043,
693
+ "step": 84
694
+ },
695
+ {
696
+ "epoch": 0.044832877443458534,
697
+ "eval_loss": 1.209867000579834,
698
+ "eval_runtime": 131.1054,
699
+ "eval_samples_per_second": 12.036,
700
+ "eval_steps_per_second": 6.018,
701
+ "step": 84
702
+ },
703
+ {
704
+ "epoch": 0.04536660217492828,
705
+ "grad_norm": 0.3454246520996094,
706
+ "learning_rate": 1.339745962155613e-05,
707
+ "loss": 1.4326,
708
+ "step": 85
709
+ },
710
+ {
711
+ "epoch": 0.045900326906398024,
712
+ "grad_norm": 0.34903326630592346,
713
+ "learning_rate": 1.1705240714107302e-05,
714
+ "loss": 1.2395,
715
+ "step": 86
716
+ },
717
+ {
718
+ "epoch": 0.04643405163786777,
719
+ "grad_norm": 0.36401617527008057,
720
+ "learning_rate": 1.0120595370083318e-05,
721
+ "loss": 1.1309,
722
+ "step": 87
723
+ },
724
+ {
725
+ "epoch": 0.046967776369337515,
726
+ "grad_norm": 0.3476174473762512,
727
+ "learning_rate": 8.645454235739903e-06,
728
+ "loss": 1.1396,
729
+ "step": 88
730
+ },
731
+ {
732
+ "epoch": 0.04750150110080726,
733
+ "grad_norm": 0.3433617353439331,
734
+ "learning_rate": 7.281614543321269e-06,
735
+ "loss": 1.3106,
736
+ "step": 89
737
+ },
738
+ {
739
+ "epoch": 0.048035225832277005,
740
+ "grad_norm": 0.3952890932559967,
741
+ "learning_rate": 6.030737921409169e-06,
742
+ "loss": 1.3647,
743
+ "step": 90
744
+ },
745
+ {
746
+ "epoch": 0.04856895056374675,
747
+ "grad_norm": 0.3324783444404602,
748
+ "learning_rate": 4.8943483704846475e-06,
749
+ "loss": 1.3629,
750
+ "step": 91
751
+ },
752
+ {
753
+ "epoch": 0.04856895056374675,
754
+ "eval_loss": 1.209067463874817,
755
+ "eval_runtime": 131.1887,
756
+ "eval_samples_per_second": 12.028,
757
+ "eval_steps_per_second": 6.014,
758
+ "step": 91
759
+ },
760
+ {
761
+ "epoch": 0.049102675295216495,
762
+ "grad_norm": 0.3305179476737976,
763
+ "learning_rate": 3.873830406168111e-06,
764
+ "loss": 0.9684,
765
+ "step": 92
766
+ },
767
+ {
768
+ "epoch": 0.049636400026686234,
769
+ "grad_norm": 0.3130112290382385,
770
+ "learning_rate": 2.970427372400353e-06,
771
+ "loss": 1.4122,
772
+ "step": 93
773
+ },
774
+ {
775
+ "epoch": 0.05017012475815598,
776
+ "grad_norm": 0.4082207977771759,
777
+ "learning_rate": 2.1852399266194314e-06,
778
+ "loss": 1.0472,
779
+ "step": 94
780
+ },
781
+ {
782
+ "epoch": 0.050703849489625724,
783
+ "grad_norm": 0.37707728147506714,
784
+ "learning_rate": 1.5192246987791981e-06,
785
+ "loss": 1.3998,
786
+ "step": 95
787
+ },
788
+ {
789
+ "epoch": 0.05123757422109547,
790
+ "grad_norm": 0.3092212677001953,
791
+ "learning_rate": 9.731931258429638e-07,
792
+ "loss": 1.3452,
793
+ "step": 96
794
+ },
795
+ {
796
+ "epoch": 0.051771298952565215,
797
+ "grad_norm": 0.31625714898109436,
798
+ "learning_rate": 5.478104631726711e-07,
799
+ "loss": 1.3001,
800
+ "step": 97
801
+ },
802
+ {
803
+ "epoch": 0.05230502368403496,
804
+ "grad_norm": 0.39429420232772827,
805
+ "learning_rate": 2.4359497401758024e-07,
806
+ "loss": 1.2862,
807
+ "step": 98
808
+ },
809
+ {
810
+ "epoch": 0.05230502368403496,
811
+ "eval_loss": 1.2093968391418457,
812
+ "eval_runtime": 131.0498,
813
+ "eval_samples_per_second": 12.041,
814
+ "eval_steps_per_second": 6.021,
815
+ "step": 98
816
+ },
817
+ {
818
+ "epoch": 0.052838748415504705,
819
+ "grad_norm": 0.3808484375476837,
820
+ "learning_rate": 6.09172980904238e-08,
821
+ "loss": 1.3516,
822
+ "step": 99
823
+ },
824
+ {
825
+ "epoch": 0.05337247314697445,
826
+ "grad_norm": 0.33221495151519775,
827
+ "learning_rate": 0.0,
828
+ "loss": 0.9949,
829
+ "step": 100
830
  }
831
  ],
832
  "logging_steps": 1,
 
841
  "should_evaluate": false,
842
  "should_log": false,
843
  "should_save": true,
844
+ "should_training_stop": true
845
  },
846
  "attributes": {}
847
  }
848
  },
849
+ "total_flos": 8056777010577408.0,
850
  "train_batch_size": 2,
851
  "trial_name": null,
852
  "trial_params": null