jmc255 commited on
Commit
478fb4a
·
1 Parent(s): 6381d83
Files changed (1) hide show
  1. aphantasia_drawing_dataset.py +192 -0
aphantasia_drawing_dataset.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """aphantasia_drawing_dataset.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1FHMQJWfjSzSrtEpARqh7IKVrfF7LwyRA
8
+ """
9
+
10
+
11
+
12
+
13
+
14
+
15
+
16
+ #!pip install datasets -q
17
+ #from google.colab import drive
18
+ #drive.mount('/content/drive')
19
+ #path = os.getcwd() + "/drive/MyDrive/Duke/huggingface_project/aphantasia_drawing"
20
+
21
+
22
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
23
+ #
24
+ # Licensed under the Apache License, Version 2.0 (the "License");
25
+ # you may not use this file except in compliance with the License.
26
+ # You may obtain a copy of the License at
27
+ #
28
+ # http://www.apache.org/licenses/LICENSE-2.0
29
+ #
30
+ # Unless required by applicable law or agreed to in writing, software
31
+ # distributed under the License is distributed on an "AS IS" BASIS,
32
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
33
+ # See the License for the specific language governing permissions and
34
+ # limitations under the License.
35
+ # TODO: Address all TODOs and remove all explanatory comments
36
+ """TODO: Add a description here."""
37
+
38
+
39
+ #import csv
40
+ import base64
41
+ from PIL import Image
42
+ import io
43
+ import json
44
+ import os
45
+ from typing import List
46
+ import datasets
47
+ import logging
48
+
49
+ # TODO: Add BibTeX citation
50
+ # Find for instance the citation on arxiv or on the dataset repo/website
51
+ _CITATION = """\
52
+ @misc{Bainbridge_Pounder_Eardley_Baker_2023,
53
+ title={Quantifying Aphantasia through drawing: Those without visual imagery show deficits in object but not spatial memory},
54
+ url={osf.io/cahyd},
55
+ publisher={OSF},
56
+ author={Bainbridge, Wilma A and Pounder, Zoë and Eardley, Alison and Baker, Chris I},
57
+ year={2023},
58
+ month={Sep}
59
+ }
60
+ """
61
+
62
+ # TODO: Add description of the dataset here
63
+ # You can copy an official description
64
+ _DESCRIPTION = """\
65
+ This dataset comes from the Brain Bridge Lab from the University of Chicago.
66
+ It is from an online memory drawing experiment with 61 individuals with aphantasia
67
+ and 52 individuals with normal imagery. In the experiment participants 1) studied 3 separate
68
+ scene photographs presented one after the other, 2) then drew them from memory,
69
+ 3) completed a recognition task, 4) copied the images while viewing them, 5) filled out
70
+ a VVIQ and OSIQ questionnaire and also demographics questions. The data from the experiment
71
+ was made available on the OSF website linked above. It was created July 31, 2020 and last
72
+ updated September 27, 2023.
73
+ """
74
+
75
+ # TODO: Add a link to an official homepage for the dataset here
76
+ _HOMEPAGE = "https://osf.io/cahyd/"
77
+
78
+ # TODO: Add the licence for the dataset here if you can find it
79
+ _LICENSE = ""
80
+
81
+ # TODO: Add link to the official dataset URLs here
82
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
83
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
84
+
85
+ url = "https://drive.google.com/file/d/1aRhQlKPDk29yYPkx2kPhqaMwec5QZ4JE/view?usp=sharing"
86
+
87
+ def _get_drive_url(url):
88
+ base_url = 'https://drive.google.com/uc?id='
89
+ split_url = url.split('/')
90
+ return base_url + split_url[5]
91
+
92
+ _URL = {"train": _get_drive_url(url)}
93
+
94
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
95
+ class AphantasiaDrawingDataset(datasets.GeneratorBasedBuilder):
96
+ """TODO: Short description of my dataset."""
97
+
98
+ _URL = _URL
99
+ VERSION = datasets.Version("1.1.0")
100
+
101
+ def _info(self):
102
+ return datasets.DatasetInfo(
103
+ description=_DESCRIPTION,
104
+ features=datasets.Features({
105
+ "subject": {
106
+ "sub_id": datasets.Value("int32"),
107
+ "treatment": datasets.ClassLabel(names=["aphantasia", "control"]),
108
+ "demographics": {
109
+ "country": datasets.Value("string"),
110
+ "age": datasets.Value("float"),
111
+ "gender": datasets.Value("string"),
112
+ "occupation": datasets.Value("string"),
113
+ "art_ability": datasets.Value("int32"),
114
+ "art_experience": datasets.Value("string"),
115
+ "device": datasets.Value("string"),
116
+ "input": datasets.Value("string"),
117
+ "difficult": datasets.Value("string"),
118
+ "diff_explaination": datasets.Value("string"),
119
+ "vviq_score": datasets.Value("int32"),
120
+ "osiq_score": datasets.Value("int32")
121
+ },
122
+ "drawings": {
123
+ "kitchen": {
124
+ "perception": datasets.Image(decode=True, id=None),
125
+ "memory": datasets.Image(decode=True, id=None)
126
+ },
127
+ "livingroom": {
128
+ "perception": datasets.Image(decode=True, id=None),
129
+ "memory": datasets.Image(decode=True, id=None)
130
+ },
131
+ "bedroom": {
132
+ "perception": datasets.Image(decode=True, id=None),
133
+ "memory": datasets.Image(decode=True, id=None)
134
+ }
135
+ },
136
+ "image": {
137
+ "kitchen": datasets.Image(decode=True, id=None),
138
+ "livingroom": datasets.Image(decode=True, id=None),
139
+ "bedroom": datasets.Image(decode=True, id=None)
140
+ }
141
+ }
142
+ }),
143
+ # No default supervised_keys (as we have to pass both question
144
+ # and context as input).
145
+ supervised_keys=None,
146
+ homepage=_HOMEPAGE,
147
+ citation=_CITATION,
148
+ )
149
+
150
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
151
+ url_to_download = self._URL
152
+ downloaded_file = dl_manager.download_and_extract(url_to_download)
153
+ return [
154
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
155
+ "filepath": downloaded_file["train"]
156
+ })
157
+ ]
158
+
159
+ def _generate_examples(self, filepath):
160
+ """This function returns the examples in the raw (text) form."""
161
+ logging.info("generating examples from = %s", filepath)
162
+
163
+ with open(filepath, "r") as subjects_file:
164
+ subjects_data = json.load(subjects_file)
165
+ idx = 0
166
+ for sub in subjects_data:
167
+ s = subjects_data[sub]
168
+ for room in subjects_data[sub]["drawings"].keys():
169
+ if subjects_data[sub]["drawings"][room]["perception"] != "":
170
+ img_byt = base64.b64decode(subjects_data[sub]["drawings"][room]["perception"])
171
+ img = Image.open(io.BytesIO(img_byt))
172
+ subjects_data[sub]["drawings"][room]["perception"] = img
173
+ else:
174
+ subjects_data[sub]["drawings"][room]["perception"] = None
175
+
176
+ if subjects_data[sub]["drawings"][room]["memory"] != "":
177
+ img_byt = base64.b64decode(subjects_data[sub]["drawings"][room]["memory"])
178
+ img = Image.open(io.BytesIO(img_byt))
179
+ subjects_data[sub]["drawings"][room]["memory"] = img
180
+ else:
181
+ subjects_data[sub]["drawings"][room]["memory"] = None
182
+
183
+ for room in subjects_data[sub]["image"].keys():
184
+ img_byt = base64.b64decode(s["image"][room])
185
+ img = Image.open(io.BytesIO(img_byt))
186
+ subjects_data[sub]["image"][room] = img
187
+ idx += 1
188
+
189
+ yield idx-1, {
190
+ sub: subjects_data[sub]
191
+ }
192
+