djsull commited on
Commit
b56ef07
·
verified ·
1 Parent(s): 4055e94

djsull/aha_classification

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: klue/roberta-small
3
  tags:
4
  - generated_from_trainer
5
  metrics:
@@ -15,9 +15,9 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # aha_classification
17
 
18
- This model is a fine-tuned version of [klue/roberta-small](https://huggingface.co/klue/roberta-small) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.0295
21
  - F1: 1.0
22
  - Roc Auc: 1.0
23
  - Accuracy: 1.0
@@ -45,17 +45,22 @@ The following hyperparameters were used during training:
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - num_epochs: 5
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy |
53
  |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:|
54
- | No log | 1.0 | 50 | 0.1202 | 0.9969 | 0.9981 | 0.9929 |
55
- | No log | 2.0 | 100 | 0.0560 | 0.9969 | 0.9981 | 0.9929 |
56
- | No log | 3.0 | 150 | 0.0381 | 0.9969 | 0.9981 | 0.9929 |
57
- | No log | 4.0 | 200 | 0.0295 | 1.0 | 1.0 | 1.0 |
58
- | No log | 5.0 | 250 | 0.0279 | 1.0 | 1.0 | 1.0 |
 
 
 
 
 
59
 
60
 
61
  ### Framework versions
 
1
  ---
2
+ base_model: monologg/kobigbird-bert-base
3
  tags:
4
  - generated_from_trainer
5
  metrics:
 
15
 
16
  # aha_classification
17
 
18
+ This model is a fine-tuned version of [monologg/kobigbird-bert-base](https://huggingface.co/monologg/kobigbird-bert-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.0276
21
  - F1: 1.0
22
  - Roc Auc: 1.0
23
  - Accuracy: 1.0
 
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
+ - num_epochs: 15
49
 
50
  ### Training results
51
 
52
  | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy |
53
  |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:|
54
+ | No log | 1.0 | 50 | 0.2740 | 0.9906 | 0.9930 | 0.9857 |
55
+ | No log | 2.0 | 100 | 0.1222 | 0.9938 | 0.9962 | 0.9857 |
56
+ | No log | 3.0 | 150 | 0.0663 | 0.9969 | 0.9981 | 0.9929 |
57
+ | No log | 4.0 | 200 | 0.0423 | 0.9969 | 0.9981 | 0.9929 |
58
+ | No log | 5.0 | 250 | 0.0276 | 1.0 | 1.0 | 1.0 |
59
+ | No log | 6.0 | 300 | 0.0208 | 1.0 | 1.0 | 1.0 |
60
+ | No log | 7.0 | 350 | 0.0165 | 1.0 | 1.0 | 1.0 |
61
+ | No log | 8.0 | 400 | 0.0135 | 1.0 | 1.0 | 1.0 |
62
+ | No log | 9.0 | 450 | 0.0114 | 1.0 | 1.0 | 1.0 |
63
+ | 0.1025 | 10.0 | 500 | 0.0101 | 1.0 | 1.0 | 1.0 |
64
 
65
 
66
  ### Framework versions
config.json CHANGED
@@ -1,14 +1,16 @@
1
  {
2
- "_name_or_path": "klue/roberta-small",
3
  "architectures": [
4
- "RobertaForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "bos_token_id": 0,
 
 
8
  "classifier_dropout": null,
9
- "eos_token_id": 2,
10
  "gradient_checkpointing": false,
11
- "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
14
  "id2label": {
@@ -23,18 +25,22 @@
23
  "Service": 1,
24
  "Token": 2
25
  },
26
- "layer_norm_eps": 1e-05,
27
- "max_position_embeddings": 514,
28
- "model_type": "roberta",
29
  "num_attention_heads": 12,
30
- "num_hidden_layers": 6,
31
- "pad_token_id": 1,
 
32
  "position_embedding_type": "absolute",
33
  "problem_type": "multi_label_classification",
 
 
34
  "tokenizer_class": "BertTokenizer",
35
  "torch_dtype": "float32",
36
  "transformers_version": "4.42.3",
37
- "type_vocab_size": 1,
 
38
  "use_cache": true,
39
- "vocab_size": 32000
40
  }
 
1
  {
2
+ "_name_or_path": "monologg/kobigbird-bert-base",
3
  "architectures": [
4
+ "BigBirdForSequenceClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
+ "attention_type": "block_sparse",
8
+ "block_size": 64,
9
+ "bos_token_id": 5,
10
  "classifier_dropout": null,
11
+ "eos_token_id": 6,
12
  "gradient_checkpointing": false,
13
+ "hidden_act": "gelu_new",
14
  "hidden_dropout_prob": 0.1,
15
  "hidden_size": 768,
16
  "id2label": {
 
25
  "Service": 1,
26
  "Token": 2
27
  },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 4096,
30
+ "model_type": "big_bird",
31
  "num_attention_heads": 12,
32
+ "num_hidden_layers": 12,
33
+ "num_random_blocks": 3,
34
+ "pad_token_id": 0,
35
  "position_embedding_type": "absolute",
36
  "problem_type": "multi_label_classification",
37
+ "rescale_embeddings": false,
38
+ "sep_token_id": 3,
39
  "tokenizer_class": "BertTokenizer",
40
  "torch_dtype": "float32",
41
  "transformers_version": "4.42.3",
42
+ "type_vocab_size": 2,
43
+ "use_bias": true,
44
  "use_cache": true,
45
+ "vocab_size": 32500
46
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1cbc87f7b18c502cedd4170272203324299be81cb3b60691d95a85ad056584d
3
- size 272385276
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207c52dc2f1fc52daae93ae96eb14ef124134f04f63c722e0bd4fad404ad3291
3
+ size 457410764
runs/Sep04_08-20-40_129-146-79-76/events.out.tfevents.1725438041.129-146-79-76.38666.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7552b0cee9f13412ba2cbddeed579b6d1a15036e53c1a4230b6a83fbb0719d8
3
+ size 7753
runs/Sep04_08-25-35_129-146-79-76/events.out.tfevents.1725438336.129-146-79-76.40588.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40bb83a52b5a9f6439e78752fb9a81dfb89f3ad1c574bd0afd04beb29293d25f
3
+ size 10065
runs/Sep04_08-25-35_129-146-79-76/events.out.tfevents.1725440642.129-146-79-76.40588.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ebff5958056011d0130353e299f4fc3a603805186d1985490172e8ba5d62dac
3
+ size 508
special_tokens_map.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "bos_token": {
3
- "content": "[CLS]",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
@@ -14,7 +14,7 @@
14
  "single_word": false
15
  },
16
  "eos_token": {
17
- "content": "[SEP]",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
 
1
  {
2
  "bos_token": {
3
+ "content": "<s>",
4
  "lstrip": false,
5
  "normalized": false,
6
  "rstrip": false,
 
14
  "single_word": false
15
  },
16
  "eos_token": {
17
+ "content": "</s>",
18
  "lstrip": false,
19
  "normalized": false,
20
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[CLS]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,7 +9,7 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
@@ -17,7 +17,7 @@
17
  "special": true
18
  },
19
  "2": {
20
- "content": "[SEP]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "3": {
28
- "content": "[UNK]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -39,16 +39,32 @@
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  }
43
  },
44
- "bos_token": "[CLS]",
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_basic_tokenize": true,
48
  "do_lower_case": false,
49
- "eos_token": "[SEP]",
50
  "mask_token": "[MASK]",
51
- "model_max_length": 512,
52
  "never_split": null,
53
  "pad_token": "[PAD]",
54
  "sep_token": "[SEP]",
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  },
27
  "3": {
28
+ "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
+ },
43
+ "5": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
  }
59
  },
60
+ "bos_token": "<s>",
61
  "clean_up_tokenization_spaces": true,
62
  "cls_token": "[CLS]",
63
  "do_basic_tokenize": true,
64
  "do_lower_case": false,
65
+ "eos_token": "</s>",
66
  "mask_token": "[MASK]",
67
+ "model_max_length": 4096,
68
  "never_split": null,
69
  "pad_token": "[PAD]",
70
  "sep_token": "[SEP]",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07f22ec80f5c53070fab174cfe020f957d287267c08715a5a64dd2d369202243
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a111869a1d1fadae090c0ce031ee7add201d05bad5866735b012ba523213ce
3
  size 5112
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff