Safetensors
bert
YYama0 commited on
Commit
66a38ac
·
verified ·
1 Parent(s): e11d7a8

Upload 10 files

Browse files
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tohoku-nlp/bert-base-japanese-v3",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Medical material",
13
+ "1": "Arterial wall calcification",
14
+ "2": "Cardiomegaly",
15
+ "3": "Pericardial effusion",
16
+ "4": "Coronary artery wall calcification",
17
+ "5": "Hiatal hernia",
18
+ "6": "Lymphadenopathy",
19
+ "7": "Emphysema",
20
+ "8": "Atelectasis",
21
+ "9": "Lung nodule",
22
+ "10": "Lung opacity",
23
+ "11": "Pulmonary fibrotic sequela",
24
+ "12": "Pleural effusion",
25
+ "13": "Mosaic attenuation pattern",
26
+ "14": "Peribronchial thickening",
27
+ "15": "Consolidation",
28
+ "16": "Bronchiectasis",
29
+ "17": "Interlobular septal thickening"
30
+ },
31
+ "initializer_range": 0.02,
32
+ "intermediate_size": 3072,
33
+ "label2id": {
34
+ "Arterial wall calcification": 1,
35
+ "Atelectasis": 8,
36
+ "Bronchiectasis": 16,
37
+ "Cardiomegaly": 2,
38
+ "Consolidation": 15,
39
+ "Coronary artery wall calcification": 4,
40
+ "Emphysema": 7,
41
+ "Hiatal hernia": 5,
42
+ "Interlobular septal thickening": 17,
43
+ "Lung nodule": 9,
44
+ "Lung opacity": 10,
45
+ "Lymphadenopathy": 6,
46
+ "Medical material": 0,
47
+ "Mosaic attenuation pattern": 13,
48
+ "Peribronchial thickening": 14,
49
+ "Pericardial effusion": 3,
50
+ "Pleural effusion": 12,
51
+ "Pulmonary fibrotic sequela": 11
52
+ },
53
+ "layer_norm_eps": 1e-12,
54
+ "max_position_embeddings": 512,
55
+ "model_type": "bert",
56
+ "num_attention_heads": 12,
57
+ "num_hidden_layers": 12,
58
+ "pad_token_id": 0,
59
+ "position_embedding_type": "absolute",
60
+ "problem_type": "multi_label_classification",
61
+ "torch_dtype": "float32",
62
+ "transformers_version": "4.46.2",
63
+ "type_vocab_size": 2,
64
+ "use_cache": true,
65
+ "vocab_size": 32768
66
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c03d670a74a69342b52c25f09184ad9cbe0bc69c3cf08c28c5cc91b364c0f6e
3
+ size 444907584
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efcb7768134397b6becaf8b3e7da8a42ba5d5bd239c386ef03cd693d178b332c
3
+ size 889936250
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e93dd237ca61665515433c6519f2ad2c8ed6c85d0ea47ffadb3f5f5b9d628abd
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0a9fa61a05680362b84180a4051612f097fdec16acbb1748ba3b37d829e979
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "do_subword_tokenize": true,
48
+ "do_word_tokenize": true,
49
+ "jumanpp_kwargs": null,
50
+ "mask_token": "[MASK]",
51
+ "mecab_kwargs": {
52
+ "mecab_dic": "unidic_lite"
53
+ },
54
+ "model_max_length": 512,
55
+ "never_split": null,
56
+ "pad_token": "[PAD]",
57
+ "sep_token": "[SEP]",
58
+ "subword_tokenizer_type": "wordpiece",
59
+ "sudachi_kwargs": null,
60
+ "tokenizer_class": "BertJapaneseTokenizer",
61
+ "unk_token": "[UNK]",
62
+ "word_tokenizer_type": "mecab"
63
+ }
trainer_state.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.058695483952760696,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/dataset_for_research/ct_rate/data/ct_rate_jpn/model_output/tohoku_bert/checkpoint-9112",
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 9112,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.21949078138718173,
13
+ "grad_norm": 1.1410490274429321,
14
+ "learning_rate": 1.8902546093064093e-05,
15
+ "loss": 0.3733,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.43898156277436345,
20
+ "grad_norm": 0.5691537857055664,
21
+ "learning_rate": 1.7805092186128183e-05,
22
+ "loss": 0.2073,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.6584723441615452,
27
+ "grad_norm": 0.36492520570755005,
28
+ "learning_rate": 1.6707638279192274e-05,
29
+ "loss": 0.1498,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.8779631255487269,
34
+ "grad_norm": 0.948626697063446,
35
+ "learning_rate": 1.561018437225637e-05,
36
+ "loss": 0.1187,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 1.0,
41
+ "eval_accuracy": 0.9789288849868305,
42
+ "eval_f1": 0.9471656576774904,
43
+ "eval_loss": 0.09360361844301224,
44
+ "eval_precision": 0.9523487456960157,
45
+ "eval_recall": 0.9420386814256173,
46
+ "eval_runtime": 54.2042,
47
+ "eval_samples_per_second": 84.053,
48
+ "eval_steps_per_second": 10.516,
49
+ "step": 2278
50
+ },
51
+ {
52
+ "epoch": 1.0974539069359086,
53
+ "grad_norm": 0.35782086849212646,
54
+ "learning_rate": 1.4512730465320458e-05,
55
+ "loss": 0.0979,
56
+ "step": 2500
57
+ },
58
+ {
59
+ "epoch": 1.3169446883230904,
60
+ "grad_norm": 0.49497029185295105,
61
+ "learning_rate": 1.3415276558384549e-05,
62
+ "loss": 0.0849,
63
+ "step": 3000
64
+ },
65
+ {
66
+ "epoch": 1.536435469710272,
67
+ "grad_norm": 0.45342519879341125,
68
+ "learning_rate": 1.2317822651448641e-05,
69
+ "loss": 0.0756,
70
+ "step": 3500
71
+ },
72
+ {
73
+ "epoch": 1.755926251097454,
74
+ "grad_norm": 0.5025061368942261,
75
+ "learning_rate": 1.122036874451273e-05,
76
+ "loss": 0.0719,
77
+ "step": 4000
78
+ },
79
+ {
80
+ "epoch": 1.9754170324846356,
81
+ "grad_norm": 0.8253716826438904,
82
+ "learning_rate": 1.0122914837576823e-05,
83
+ "loss": 0.0687,
84
+ "step": 4500
85
+ },
86
+ {
87
+ "epoch": 2.0,
88
+ "eval_accuracy": 0.9834162520729685,
89
+ "eval_f1": 0.9583103427135062,
90
+ "eval_loss": 0.06783699989318848,
91
+ "eval_precision": 0.9660692212608158,
92
+ "eval_recall": 0.9506751003527552,
93
+ "eval_runtime": 54.005,
94
+ "eval_samples_per_second": 84.362,
95
+ "eval_steps_per_second": 10.555,
96
+ "step": 4556
97
+ },
98
+ {
99
+ "epoch": 2.194907813871817,
100
+ "grad_norm": 1.1863067150115967,
101
+ "learning_rate": 9.025460930640914e-06,
102
+ "loss": 0.0585,
103
+ "step": 5000
104
+ },
105
+ {
106
+ "epoch": 2.4143985952589992,
107
+ "grad_norm": 0.6489052176475525,
108
+ "learning_rate": 7.928007023705005e-06,
109
+ "loss": 0.0573,
110
+ "step": 5500
111
+ },
112
+ {
113
+ "epoch": 2.633889376646181,
114
+ "grad_norm": 0.8411669731140137,
115
+ "learning_rate": 6.830553116769097e-06,
116
+ "loss": 0.0534,
117
+ "step": 6000
118
+ },
119
+ {
120
+ "epoch": 2.853380158033363,
121
+ "grad_norm": 1.2083945274353027,
122
+ "learning_rate": 5.7330992098331876e-06,
123
+ "loss": 0.0528,
124
+ "step": 6500
125
+ },
126
+ {
127
+ "epoch": 3.0,
128
+ "eval_accuracy": 0.9838796215003415,
129
+ "eval_f1": 0.959547123623011,
130
+ "eval_loss": 0.06107071042060852,
131
+ "eval_precision": 0.9655745781500185,
132
+ "eval_recall": 0.9535944532295341,
133
+ "eval_runtime": 54.1061,
134
+ "eval_samples_per_second": 84.205,
135
+ "eval_steps_per_second": 10.535,
136
+ "step": 6834
137
+ },
138
+ {
139
+ "epoch": 3.0728709394205445,
140
+ "grad_norm": 0.5386723279953003,
141
+ "learning_rate": 4.6356453028972785e-06,
142
+ "loss": 0.0496,
143
+ "step": 7000
144
+ },
145
+ {
146
+ "epoch": 3.292361720807726,
147
+ "grad_norm": 0.5504190921783447,
148
+ "learning_rate": 3.53819139596137e-06,
149
+ "loss": 0.0441,
150
+ "step": 7500
151
+ },
152
+ {
153
+ "epoch": 3.511852502194908,
154
+ "grad_norm": 0.8231956958770752,
155
+ "learning_rate": 2.440737489025461e-06,
156
+ "loss": 0.0456,
157
+ "step": 8000
158
+ },
159
+ {
160
+ "epoch": 3.7313432835820897,
161
+ "grad_norm": 1.272826910018921,
162
+ "learning_rate": 1.3432835820895524e-06,
163
+ "loss": 0.0447,
164
+ "step": 8500
165
+ },
166
+ {
167
+ "epoch": 3.9508340649692713,
168
+ "grad_norm": 1.6443417072296143,
169
+ "learning_rate": 2.458296751536436e-07,
170
+ "loss": 0.0434,
171
+ "step": 9000
172
+ },
173
+ {
174
+ "epoch": 4.0,
175
+ "eval_accuracy": 0.9843429909277144,
176
+ "eval_f1": 0.9607147228001469,
177
+ "eval_loss": 0.058695483952760696,
178
+ "eval_precision": 0.9666297254032754,
179
+ "eval_recall": 0.9548716701131249,
180
+ "eval_runtime": 54.169,
181
+ "eval_samples_per_second": 84.107,
182
+ "eval_steps_per_second": 10.523,
183
+ "step": 9112
184
+ }
185
+ ],
186
+ "logging_steps": 500,
187
+ "max_steps": 9112,
188
+ "num_input_tokens_seen": 0,
189
+ "num_train_epochs": 4,
190
+ "save_steps": 500,
191
+ "stateful_callbacks": {
192
+ "TrainerControl": {
193
+ "args": {
194
+ "should_epoch_stop": false,
195
+ "should_evaluate": false,
196
+ "should_log": false,
197
+ "should_save": true,
198
+ "should_training_stop": true
199
+ },
200
+ "attributes": {}
201
+ }
202
+ },
203
+ "total_flos": 1.755253733461956e+16,
204
+ "train_batch_size": 8,
205
+ "trial_name": null,
206
+ "trial_params": null
207
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da57d8f605f6266d3453037fec717cdad2f16c7446ee5ccf25d1dc2f6821de1c
3
+ size 5432
vocab.txt ADDED
The diff for this file is too large to render. See raw diff