griffio commited on
Commit
5802e49
·
verified ·
1 Parent(s): 024d524

dungeon-geo-morphs

Browse files
README.md CHANGED
@@ -3,6 +3,7 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-224
5
  tags:
 
6
  - generated_from_trainer
7
  datasets:
8
  - imagefolder
@@ -15,7 +16,7 @@ model-index:
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
- name: imagefolder
19
  type: imagefolder
20
  config: default
21
  split: validation
@@ -31,9 +32,9 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # vit-large-patch16-224-dungeon-geo-morphs-0-4-30Nov24-012
33
 
34
- This model is a fine-tuned version of [google/vit-large-patch16-224](https://huggingface.co/google/vit-large-patch16-224) on the imagefolder dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 0.0681
37
  - Accuracy: 0.9929
38
 
39
  ## Model description
 
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-224
5
  tags:
6
+ - image-classification
7
  - generated_from_trainer
8
  datasets:
9
  - imagefolder
 
16
  name: Image Classification
17
  type: image-classification
18
  dataset:
19
+ name: dungeon-geo-morphs
20
  type: imagefolder
21
  config: default
22
  split: validation
 
32
 
33
  # vit-large-patch16-224-dungeon-geo-morphs-0-4-30Nov24-012
34
 
35
+ This model is a fine-tuned version of [google/vit-large-patch16-224](https://huggingface.co/google/vit-large-patch16-224) on the dungeon-geo-morphs dataset.
36
  It achieves the following results on the evaluation set:
37
+ - Loss: 0.0763
38
  - Accuracy: 0.9929
39
 
40
  ## Model description
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 23.90909090909091,
3
+ "eval_accuracy": 0.9928571428571429,
4
+ "eval_loss": 0.07632029056549072,
5
+ "eval_runtime": 7.7287,
6
+ "eval_samples_per_second": 72.457,
7
+ "eval_steps_per_second": 9.057,
8
+ "total_flos": 5.259589576438579e+17,
9
+ "train_loss": 0.39049412111441295,
10
+ "train_runtime": 294.5306,
11
+ "train_samples_per_second": 8.352,
12
+ "train_steps_per_second": 0.204
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 23.90909090909091,
3
+ "eval_accuracy": 0.9928571428571429,
4
+ "eval_loss": 0.07632029056549072,
5
+ "eval_runtime": 7.7287,
6
+ "eval_samples_per_second": 72.457,
7
+ "eval_steps_per_second": 9.057
8
+ }
runs/Nov30_17-54-31_c9f227a2e97a/events.out.tfevents.1732989637.c9f227a2e97a.1042.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd33cdd2aab0a60d8ec6f251e381db52fd150e639475f2bdbcfa42840eef9eb
3
+ size 405
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 23.90909090909091,
3
+ "total_flos": 5.259589576438579e+17,
4
+ "train_loss": 0.39049412111441295,
5
+ "train_runtime": 294.5306,
6
+ "train_samples_per_second": 8.352,
7
+ "train_steps_per_second": 0.204
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9928571428571429,
3
+ "best_model_checkpoint": "vit-large-patch16-224-dungeon-geo-morphs-0-4-30Nov24-012/checkpoint-50",
4
+ "epoch": 23.90909090909091,
5
+ "eval_steps": 10,
6
+ "global_step": 60,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 3.909090909090909,
13
+ "grad_norm": 22.11748695373535,
14
+ "learning_rate": 1.888888888888889e-05,
15
+ "loss": 1.4699,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 3.909090909090909,
20
+ "eval_accuracy": 0.7821428571428571,
21
+ "eval_loss": 0.9471383094787598,
22
+ "eval_runtime": 7.2068,
23
+ "eval_samples_per_second": 77.704,
24
+ "eval_steps_per_second": 9.713,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 7.909090909090909,
29
+ "grad_norm": 14.252605438232422,
30
+ "learning_rate": 1.5185185185185187e-05,
31
+ "loss": 0.5263,
32
+ "step": 20
33
+ },
34
+ {
35
+ "epoch": 7.909090909090909,
36
+ "eval_accuracy": 0.9214285714285714,
37
+ "eval_loss": 0.37957894802093506,
38
+ "eval_runtime": 7.8394,
39
+ "eval_samples_per_second": 71.434,
40
+ "eval_steps_per_second": 8.929,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 11.909090909090908,
45
+ "grad_norm": 18.25510597229004,
46
+ "learning_rate": 1.1481481481481482e-05,
47
+ "loss": 0.1867,
48
+ "step": 30
49
+ },
50
+ {
51
+ "epoch": 11.909090909090908,
52
+ "eval_accuracy": 0.9357142857142857,
53
+ "eval_loss": 0.24581122398376465,
54
+ "eval_runtime": 7.5991,
55
+ "eval_samples_per_second": 73.693,
56
+ "eval_steps_per_second": 9.212,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 15.909090909090908,
61
+ "grad_norm": 16.120893478393555,
62
+ "learning_rate": 7.77777777777778e-06,
63
+ "loss": 0.0908,
64
+ "step": 40
65
+ },
66
+ {
67
+ "epoch": 15.909090909090908,
68
+ "eval_accuracy": 0.9857142857142858,
69
+ "eval_loss": 0.1266908347606659,
70
+ "eval_runtime": 7.4977,
71
+ "eval_samples_per_second": 74.69,
72
+ "eval_steps_per_second": 9.336,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 19.90909090909091,
77
+ "grad_norm": 4.964926719665527,
78
+ "learning_rate": 4.074074074074074e-06,
79
+ "loss": 0.0436,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 19.90909090909091,
84
+ "eval_accuracy": 0.9928571428571429,
85
+ "eval_loss": 0.07632029056549072,
86
+ "eval_runtime": 7.2797,
87
+ "eval_samples_per_second": 76.926,
88
+ "eval_steps_per_second": 9.616,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 23.90909090909091,
93
+ "grad_norm": 2.2112691402435303,
94
+ "learning_rate": 3.7037037037037036e-07,
95
+ "loss": 0.0256,
96
+ "step": 60
97
+ },
98
+ {
99
+ "epoch": 23.90909090909091,
100
+ "eval_accuracy": 0.9928571428571429,
101
+ "eval_loss": 0.0681236982345581,
102
+ "eval_runtime": 8.3628,
103
+ "eval_samples_per_second": 66.964,
104
+ "eval_steps_per_second": 8.37,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 23.90909090909091,
109
+ "step": 60,
110
+ "total_flos": 5.259589576438579e+17,
111
+ "train_loss": 0.39049412111441295,
112
+ "train_runtime": 294.5306,
113
+ "train_samples_per_second": 8.352,
114
+ "train_steps_per_second": 0.204
115
+ }
116
+ ],
117
+ "logging_steps": 10,
118
+ "max_steps": 60,
119
+ "num_input_tokens_seen": 0,
120
+ "num_train_epochs": 30,
121
+ "save_steps": 10,
122
+ "stateful_callbacks": {
123
+ "TrainerControl": {
124
+ "args": {
125
+ "should_epoch_stop": false,
126
+ "should_evaluate": false,
127
+ "should_log": false,
128
+ "should_save": true,
129
+ "should_training_stop": true
130
+ },
131
+ "attributes": {}
132
+ }
133
+ },
134
+ "total_flos": 5.259589576438579e+17,
135
+ "train_batch_size": 8,
136
+ "trial_name": null,
137
+ "trial_params": null
138
+ }