sanali209 commited on
Commit
4732b36
·
1 Parent(s): 641bb49

commit files to HF hub

Browse files
README.md CHANGED
@@ -1,60 +1,31 @@
1
  ---
2
  tags:
3
- - generated_from_trainer
4
- datasets:
5
- - imagefolder
 
 
 
6
  model-index:
7
- - name: imclasif-genres-v001
8
- results: []
 
 
 
 
 
 
 
9
  ---
10
 
11
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
- should probably proofread and complete it, then remove this comment. -->
13
-
14
- # imclasif-genres-v001
15
-
16
- This model was trained from scratch on the imagefolder dataset.
17
- It achieves the following results on the evaluation set:
18
- - Loss: 1.0340
19
-
20
- ## Model description
21
-
22
- More information needed
23
-
24
- ## Intended uses & limitations
25
-
26
- More information needed
27
-
28
- ## Training and evaluation data
29
-
30
- More information needed
31
-
32
- ## Training procedure
33
-
34
- ### Training hyperparameters
35
 
36
- The following hyperparameters were used during training:
37
- - learning_rate: 2e-05
38
- - train_batch_size: 32
39
- - eval_batch_size: 16
40
- - seed: 42
41
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
- - lr_scheduler_type: linear
43
- - num_epochs: 4
44
 
45
- ### Training results
46
 
47
- | Training Loss | Epoch | Step | Validation Loss |
48
- |:-------------:|:-----:|:----:|:---------------:|
49
- | No log | 1.0 | 54 | 1.2259 |
50
- | No log | 2.0 | 108 | 1.0907 |
51
- | No log | 3.0 | 162 | 1.0382 |
52
- | No log | 4.0 | 216 | 1.0340 |
53
 
 
54
 
55
- ### Framework versions
56
 
57
- - Transformers 4.44.0
58
- - Pytorch 2.3.1+cu121
59
- - Datasets 2.20.0
60
- - Tokenizers 0.19.1
 
1
  ---
2
  tags:
3
+ - image-classification
4
+ - pytorch
5
+ - huggingpics
6
+ metrics:
7
+ - accuracy
8
+
9
  model-index:
10
+ - name: sanali209/imclasif-genres-v001
11
+ results:
12
+ - task:
13
+ name: Image Classification
14
+ type: image-classification
15
+ metrics:
16
+ - name: Accuracy
17
+ type: accuracy
18
+ value: 0.8868578672409058
19
  ---
20
 
21
+ # sanali209/imclasif-genres-v001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
 
 
 
 
 
 
 
23
 
24
+ Autogenerated by HuggingPics🤗🖼️
25
 
26
+ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb).
 
 
 
 
 
27
 
28
+ Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics).
29
 
 
30
 
31
+ ## Example Images
 
 
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "sanali209/imclasif-genres-v001",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
@@ -9,7 +9,7 @@
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
- "0": "3d renderer",
13
  "1": "combined",
14
  "2": "drawing",
15
  "3": "other",
@@ -21,13 +21,13 @@
21
  "initializer_range": 0.02,
22
  "intermediate_size": 3072,
23
  "label2id": {
24
- "3d renderer": 0,
25
- "combined": 1,
26
- "drawing": 2,
27
- "other": 3,
28
- "photo": 4,
29
- "pixel art": 5,
30
- "text": 6
31
  },
32
  "layer_norm_eps": 1e-12,
33
  "model_type": "vit",
@@ -38,5 +38,5 @@
38
  "problem_type": "single_label_classification",
39
  "qkv_bias": true,
40
  "torch_dtype": "float32",
41
- "transformers_version": "4.44.0"
42
  }
 
1
  {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
  "architectures": [
4
  "ViTForImageClassification"
5
  ],
 
9
  "hidden_dropout_prob": 0.0,
10
  "hidden_size": 768,
11
  "id2label": {
12
+ "0": "3d render",
13
  "1": "combined",
14
  "2": "drawing",
15
  "3": "other",
 
21
  "initializer_range": 0.02,
22
  "intermediate_size": 3072,
23
  "label2id": {
24
+ "3d render": "0",
25
+ "combined": "1",
26
+ "drawing": "2",
27
+ "other": "3",
28
+ "photo": "4",
29
+ "pixel art": "5",
30
+ "text": "6"
31
  },
32
  "layer_norm_eps": 1e-12,
33
  "model_type": "vit",
 
38
  "problem_type": "single_label_classification",
39
  "qkv_bias": true,
40
  "torch_dtype": "float32",
41
+ "transformers_version": "4.44.2"
42
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69ac4d625b25aa1a8ba203eb98bc93ebbf39817ad9d6a8731aea04b6c4d1bb9f
3
  size 343239356
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0eb214f123dc7f948df976691ef970519c0a3957962520ceba6a6e79f125d19
3
  size 343239356
preprocessor_config.json CHANGED
@@ -7,7 +7,7 @@
7
  0.5,
8
  0.5
9
  ],
10
- "image_processor_type": "ViTImageProcessor",
11
  "image_std": [
12
  0.5,
13
  0.5,
 
7
  0.5,
8
  0.5
9
  ],
10
+ "image_processor_type": "ViTFeatureExtractor",
11
  "image_std": [
12
  0.5,
13
  0.5,
runs/events.out.tfevents.1729920614.6934cbcb2c82.1901.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3550c215f4ff8006c51ccd058ea9a012ffa25cc8f1e1f6b86e40a9b7fab6495
3
+ size 17584