File size: 3,472 Bytes
808187d
 
 
 
 
 
 
eadf52e
 
808187d
f7f90b0
62e9177
808187d
e774f45
 
808187d
3371f7f
808187d
 
 
 
 
 
e774f45
 
808187d
 
 
d11e825
1ba6734
 
808187d
5c728e6
 
d11e825
808187d
62e9177
808187d
 
 
d11e825
 
e774f45
d11e825
 
 
 
 
 
 
 
e774f45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d11e825
e774f45
 
 
 
 
 
 
 
 
 
 
d11e825
e774f45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import os
import re
import datasets

import pandas as pd

from requests import get

logger = datasets.logging.get_logger(__name__)
datasets.logging.set_verbosity_info()


_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"

_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"

_URLS = {
   "train_test_split": f"{_REPO}/train_test_split.txt",
   "classes": f"{_REPO}/classes.txt",
   "image_class_labels": f"{_REPO}/image_class_labels.txt",
   "images": f"{_REPO}/images.txt",
   "image_urls": f"{_REPO}/images.zip",
   "text_urls": f"{_REPO}/text.zip",
}

# Create id-to-label dictionary using the classes file
classes = get(_URLS["classes"]).iter_lines()
logger.info(f"classes: {classes}")

_ID2LABEL = {}
for row in classes:
   row = row.decode("UTF8")
   if row != "":
      idx, label = row.split(" ")
      _ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
      
logger.info(f"_ID2LABEL: {_ID2LABEL}")

_NAMES = list(_ID2LABEL.values())

# build from images.txt: a mapping from image_file_name -> id
imgpath_to_ids = get(_URLS["images"]).iter_lines()
_IMGNAME2ID = {}
for row in imagepath_to_ids:
   row = row.decode("UTF8")
   if row != "":
      idx, img_name = row.split(" ")
      _IMGNAME2ID[img_name] = int(idx)
   


class CubDataset(datasets.GeneratorBasedBuilder):
   """Cub Dataset"""
   
   def _info(self):
      features = datasets.Features({
         "image": datasets.Image(),
         "labels": datasets.features.ClassLabel(names=_NAMES),
      })
      keys = ("image", "labels")
      
      return datasets.DatasetInfo(
         description=_DESCRIPTION,
         features=features,
         supervised_keys=keys,
         homepage=_HOMEPAGE,
         citation=_CITATION,
      )
      
      
   def _split_generators(self, dl_manager):
      # 1: train, 0: test
      train_test_split = get(_URLS["train_test_split"]).iter_lines()
      train_images_idx = set([int(x.decode("UTF8").split(" ")[0]) for x in train_test_split if x.decode("UTF8").split(" ")[1] == 1])
      logger.info(f"train_images_idx length: {len(train_images_idx)}")
      
      train_files = []
      test_files = []
      
      # Download images
      data_files = dl_manager.download_and_extract(_URLS["image_urls"])
      
      for batch in data_files:
         path_files = dl_manager.iter_files(batch)
         for img in path_files:
            if _IMGNAME2ID[os.path.basename(img)] in train_images_idx:
               train_files.append(img)
            else:
               test_files.append(img)
               
      return [
                 datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                       "files": train_files
                    }
                 ),
                 datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                       "files": test_files
                    }
                 )
      ]
      
      
   def _generate_examples(self, files):
   
      for i, path in enumerate(files):
         file_name = os.path.basename(path)
         if file_name.endswith(".jpg"):
            yield i, {
               "image": path,
               "labels": os.path.basename(os.path.dirname(path)).lower(),
            }