gokulsrinivasagan commited on
Commit
89a938b
·
verified ·
1 Parent(s): b9611ab

End of training

Browse files
README.md CHANGED
@@ -1,14 +1,32 @@
1
  ---
2
  library_name: transformers
 
 
3
  base_model: gokulsrinivasagan/bert_tiny_lda_5_v1_book
4
  tags:
5
  - generated_from_trainer
 
 
6
  metrics:
7
  - matthews_correlation
8
  - accuracy
9
  model-index:
10
  - name: bert_tiny_lda_5_v1_book_cola
11
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,11 +34,11 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # bert_tiny_lda_5_v1_book_cola
18
 
19
- This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_5_v1_book) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7927
22
- - Matthews Correlation: 0.2940
23
- - Accuracy: 0.7239
24
 
25
  ## Model description
26
 
 
1
  ---
2
  library_name: transformers
3
+ language:
4
+ - en
5
  base_model: gokulsrinivasagan/bert_tiny_lda_5_v1_book
6
  tags:
7
  - generated_from_trainer
8
+ datasets:
9
+ - glue
10
  metrics:
11
  - matthews_correlation
12
  - accuracy
13
  model-index:
14
  - name: bert_tiny_lda_5_v1_book_cola
15
+ results:
16
+ - task:
17
+ name: Text Classification
18
+ type: text-classification
19
+ dataset:
20
+ name: GLUE COLA
21
+ type: glue
22
+ args: cola
23
+ metrics:
24
+ - name: Matthews Correlation
25
+ type: matthews_correlation
26
+ value: 0.2911416522578436
27
+ - name: Accuracy
28
+ type: accuracy
29
+ value: 0.7286673188209534
30
  ---
31
 
32
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
34
 
35
  # bert_tiny_lda_5_v1_book_cola
36
 
37
+ This model is a fine-tuned version of [gokulsrinivasagan/bert_tiny_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/bert_tiny_lda_5_v1_book) on the GLUE COLA dataset.
38
  It achieves the following results on the evaluation set:
39
+ - Loss: 0.5533
40
+ - Matthews Correlation: 0.2911
41
+ - Accuracy: 0.7287
42
 
43
  ## Model description
44
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.7286673188209534,
4
+ "eval_loss": 0.5532552003860474,
5
+ "eval_matthews_correlation": 0.2911416522578436,
6
+ "eval_runtime": 0.3388,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 3078.762,
9
+ "eval_steps_per_second": 14.759,
10
+ "total_flos": 1793902531485696.0,
11
+ "train_loss": 0.374275638776667,
12
+ "train_runtime": 49.5834,
13
+ "train_samples": 8551,
14
+ "train_samples_per_second": 8622.849,
15
+ "train_steps_per_second": 34.286
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_accuracy": 0.7286673188209534,
4
+ "eval_loss": 0.5532552003860474,
5
+ "eval_matthews_correlation": 0.2911416522578436,
6
+ "eval_runtime": 0.3388,
7
+ "eval_samples": 1043,
8
+ "eval_samples_per_second": 3078.762,
9
+ "eval_steps_per_second": 14.759
10
+ }
logs/events.out.tfevents.1733835489.ki-g0008.683966.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fbe437b0c0dbe6c9e249ecf85c2ad345231066eea698e194db382b436b1b7ea
3
+ size 475
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "total_flos": 1793902531485696.0,
4
+ "train_loss": 0.374275638776667,
5
+ "train_runtime": 49.5834,
6
+ "train_samples": 8551,
7
+ "train_samples_per_second": 8622.849,
8
+ "train_steps_per_second": 34.286
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5532552003860474,
3
+ "best_model_checkpoint": "bert_tiny_lda_5_v1_book_cola/checkpoint-102",
4
+ "epoch": 8.0,
5
+ "eval_steps": 500,
6
+ "global_step": 272,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.9584577679634094,
14
+ "learning_rate": 4.9e-05,
15
+ "loss": 0.608,
16
+ "step": 34
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.6912751793861389,
21
+ "eval_loss": 0.5934335589408875,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.3362,
24
+ "eval_samples_per_second": 3101.953,
25
+ "eval_steps_per_second": 14.87,
26
+ "step": 34
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "grad_norm": 2.1856284141540527,
31
+ "learning_rate": 4.8e-05,
32
+ "loss": 0.5582,
33
+ "step": 68
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.7133269309997559,
38
+ "eval_loss": 0.5757725238800049,
39
+ "eval_matthews_correlation": 0.21056723021623536,
40
+ "eval_runtime": 0.3487,
41
+ "eval_samples_per_second": 2990.688,
42
+ "eval_steps_per_second": 14.337,
43
+ "step": 68
44
+ },
45
+ {
46
+ "epoch": 3.0,
47
+ "grad_norm": 3.2397983074188232,
48
+ "learning_rate": 4.7e-05,
49
+ "loss": 0.4656,
50
+ "step": 102
51
+ },
52
+ {
53
+ "epoch": 3.0,
54
+ "eval_accuracy": 0.7286673188209534,
55
+ "eval_loss": 0.5532552003860474,
56
+ "eval_matthews_correlation": 0.2911416522578436,
57
+ "eval_runtime": 0.3619,
58
+ "eval_samples_per_second": 2881.981,
59
+ "eval_steps_per_second": 13.816,
60
+ "step": 102
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "grad_norm": 4.243818283081055,
65
+ "learning_rate": 4.600000000000001e-05,
66
+ "loss": 0.386,
67
+ "step": 136
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "eval_accuracy": 0.7257909774780273,
72
+ "eval_loss": 0.6198986768722534,
73
+ "eval_matthews_correlation": 0.2783851428937992,
74
+ "eval_runtime": 0.3305,
75
+ "eval_samples_per_second": 3155.587,
76
+ "eval_steps_per_second": 15.127,
77
+ "step": 136
78
+ },
79
+ {
80
+ "epoch": 5.0,
81
+ "grad_norm": 5.036900043487549,
82
+ "learning_rate": 4.5e-05,
83
+ "loss": 0.3164,
84
+ "step": 170
85
+ },
86
+ {
87
+ "epoch": 5.0,
88
+ "eval_accuracy": 0.7190795540809631,
89
+ "eval_loss": 0.649246096611023,
90
+ "eval_matthews_correlation": 0.27384707024880584,
91
+ "eval_runtime": 0.3392,
92
+ "eval_samples_per_second": 3075.234,
93
+ "eval_steps_per_second": 14.742,
94
+ "step": 170
95
+ },
96
+ {
97
+ "epoch": 6.0,
98
+ "grad_norm": 4.896561145782471,
99
+ "learning_rate": 4.4000000000000006e-05,
100
+ "loss": 0.2656,
101
+ "step": 204
102
+ },
103
+ {
104
+ "epoch": 6.0,
105
+ "eval_accuracy": 0.7238734364509583,
106
+ "eval_loss": 0.6995246410369873,
107
+ "eval_matthews_correlation": 0.2903905003262487,
108
+ "eval_runtime": 0.3342,
109
+ "eval_samples_per_second": 3120.562,
110
+ "eval_steps_per_second": 14.96,
111
+ "step": 204
112
+ },
113
+ {
114
+ "epoch": 7.0,
115
+ "grad_norm": 7.966293811798096,
116
+ "learning_rate": 4.3e-05,
117
+ "loss": 0.2146,
118
+ "step": 238
119
+ },
120
+ {
121
+ "epoch": 7.0,
122
+ "eval_accuracy": 0.7267497777938843,
123
+ "eval_loss": 0.8111398816108704,
124
+ "eval_matthews_correlation": 0.2862670482920985,
125
+ "eval_runtime": 0.3314,
126
+ "eval_samples_per_second": 3147.321,
127
+ "eval_steps_per_second": 15.088,
128
+ "step": 238
129
+ },
130
+ {
131
+ "epoch": 8.0,
132
+ "grad_norm": 3.7810440063476562,
133
+ "learning_rate": 4.2e-05,
134
+ "loss": 0.1798,
135
+ "step": 272
136
+ },
137
+ {
138
+ "epoch": 8.0,
139
+ "eval_accuracy": 0.7238734364509583,
140
+ "eval_loss": 0.7927041053771973,
141
+ "eval_matthews_correlation": 0.2940303604806882,
142
+ "eval_runtime": 0.3232,
143
+ "eval_samples_per_second": 3227.352,
144
+ "eval_steps_per_second": 15.471,
145
+ "step": 272
146
+ },
147
+ {
148
+ "epoch": 8.0,
149
+ "step": 272,
150
+ "total_flos": 1793902531485696.0,
151
+ "train_loss": 0.374275638776667,
152
+ "train_runtime": 49.5834,
153
+ "train_samples_per_second": 8622.849,
154
+ "train_steps_per_second": 34.286
155
+ }
156
+ ],
157
+ "logging_steps": 1,
158
+ "max_steps": 1700,
159
+ "num_input_tokens_seen": 0,
160
+ "num_train_epochs": 50,
161
+ "save_steps": 500,
162
+ "stateful_callbacks": {
163
+ "EarlyStoppingCallback": {
164
+ "args": {
165
+ "early_stopping_patience": 5,
166
+ "early_stopping_threshold": 0.0
167
+ },
168
+ "attributes": {
169
+ "early_stopping_patience_counter": 5
170
+ }
171
+ },
172
+ "TrainerControl": {
173
+ "args": {
174
+ "should_epoch_stop": false,
175
+ "should_evaluate": false,
176
+ "should_log": false,
177
+ "should_save": true,
178
+ "should_training_stop": true
179
+ },
180
+ "attributes": {}
181
+ }
182
+ },
183
+ "total_flos": 1793902531485696.0,
184
+ "train_batch_size": 256,
185
+ "trial_name": null,
186
+ "trial_params": null
187
+ }