Ioana-Simion commited on
Commit
861b053
·
verified ·
1 Parent(s): 4e326fe

Suggested change in comment

Browse files
Files changed (1) hide show
  1. scripts/generate.py +275 -274
scripts/generate.py CHANGED
@@ -1,275 +1,276 @@
1
- import argparse
2
- import shutil
3
- import pickle
4
- import logging
5
- from omegaconf import OmegaConf
6
- import re
7
- import random
8
- import tarfile
9
- from pydantic import BaseModel
10
- from pathlib import Path
11
-
12
- logging.basicConfig(level=logging.INFO)
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- def setup_parser():
17
- parser = argparse.ArgumentParser(description="Generate a domain shift dataset")
18
- parser.add_argument("--config", type=str, required=True, help="Path to config file")
19
- parser.add_argument(
20
- "--output_dir", type=str, required=True, help="Path to output directory"
21
- )
22
- parser.add_argument(
23
- "--full_candidate_subsets_path",
24
- type=str,
25
- required=True,
26
- help="Path to full-candidate-subsets.pkl",
27
- )
28
- parser.add_argument(
29
- "--visual_genome_images_dir",
30
- type=str,
31
- required=True,
32
- help="Path to VisualGenome images directory allImages/images",
33
- )
34
- return parser
35
-
36
-
37
- def get_ms_domain_name(obj: str, context: str) -> str:
38
- return f"{obj}({context})"
39
-
40
-
41
- class DataSplits(BaseModel):
42
- train: dict[str, list[str]]
43
- test: dict[str, list[str]]
44
-
45
-
46
- class MetashiftData(BaseModel):
47
- selected_classes: list[str]
48
- spurious_class: str
49
- train_context: str
50
- test_context: str
51
- data_splits: DataSplits
52
-
53
-
54
- class MetashiftFactory(object):
55
- object_context_to_id: dict[str, list[int]]
56
- visual_genome_images_dir: str
57
-
58
- def __init__(
59
- self,
60
- full_candidate_subsets_path: str,
61
- visual_genome_images_dir: str,
62
- ):
63
- """
64
- full_candidate_subsets_path: Path to `full-candidate-subsets.pkl`
65
- visual_genome_images_dir: Path to VisualGenome images directory `allImages/images`
66
- """
67
- with open(full_candidate_subsets_path, "rb") as f:
68
- self.object_context_to_id = pickle.load(f)
69
- self.visual_genome_images_dir = visual_genome_images_dir
70
-
71
- def _get_all_domains_with_object(self, obj: str) -> set[str]:
72
- """Get all domains with given object and any context.
73
- Example:
74
- - _get_all_domains_with_object(table) => [table(dog), table(cat), ...]
75
- """
76
- return {
77
- key
78
- for key in self.object_context_to_id.keys()
79
- if re.match(f"^{obj}\\(.*\\)$", key)
80
- }
81
-
82
- def _get_all_image_ids_with_object(self, obj: str) -> set[str]:
83
- """Get all image ids with given object and any context.
84
- Example:
85
- - get_all_image_ids_with_object(table) => [id~table(dog), id~table(cat), ...]
86
- - where id~domain, means an image sampled from the given domain.
87
- """
88
- domains = self._get_all_domains_with_object(obj)
89
- return {_id for domain in domains for _id in self.object_context_to_id[domain]}
90
-
91
- def _get_image_ids(self, obj: str, context: str | None, exclude_context: str | None = None) -> set[str]:
92
- """Get image ids for the domain `obj(context)`, optionally excluding a specific context."""
93
- if exclude_context is not None:
94
- all_ids = self._get_all_image_ids_with_object(obj)
95
- exclude_ids = self.object_context_to_id[get_ms_domain_name(obj, exclude_context)]
96
- return all_ids - exclude_ids
97
- elif context is not None:
98
- return self.object_context_to_id[get_ms_domain_name(obj, context)]
99
- else:
100
- return self._get_all_image_ids_with_object(obj)
101
-
102
- def _get_class_domains(
103
- self, domains_specification: dict[str, tuple[str, str | None]]
104
- ) -> dict[str, tuple[list[str], list[str]]]:
105
- """Get train and test image ids for the given domains specification."""
106
- domain_ids = dict()
107
- for cls, (train_context, test_context) in domains_specification.items():
108
- if train_context == test_context:
109
- train_ids = self._get_image_ids(cls, context=train_context)
110
- test_ids = self._get_image_ids(cls, context=None, exclude_context=test_context)
111
- domain_ids[cls] = [train_ids, test_ids]
112
- logger.info(
113
- f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
114
- " -> "
115
- f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
116
- )
117
- else:
118
- train_ids = self._get_image_ids(cls, train_context)
119
- test_ids = self._get_image_ids(cls, test_context)
120
- domain_ids[cls] = [train_ids, test_ids]
121
- logger.info(
122
- f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
123
- " -> "
124
- f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
125
- )
126
- return domain_ids
127
-
128
- def _sample_from_domains(
129
- self,
130
- seed: int,
131
- domains: dict[str, tuple[list[str], list[str]]],
132
- num_train_images_per_class: int,
133
- num_test_images_per_class: int,
134
- ) -> dict[str, tuple[list[str], list[str]]]:
135
- """Return sampled domain data from the given full domains."""
136
- # TODO: Do we have to ensure that there's no overlap between classes?
137
- # For example, we could have repeated files in training for different classes.
138
- sampled_domains = dict()
139
- for cls, (train_ids, test_ids) in domains.items():
140
- try:
141
- sampled_train_ids = random.Random(seed).sample(
142
- list(train_ids), num_train_images_per_class
143
- )
144
- test_ids = test_ids - set(sampled_train_ids)
145
- sampled_test_ids = random.Random(seed).sample(
146
- list(test_ids), num_test_images_per_class
147
- )
148
- except ValueError:
149
- logger.error(
150
- f"{cls}: {len(train_ids)} train images, {len(test_ids)} test images"
151
- )
152
- raise Exception("Not enough images for this class")
153
- sampled_domains[cls] = (sampled_train_ids, sampled_test_ids)
154
- return sampled_domains
155
-
156
- def create(
157
- self,
158
- seed: int,
159
- selected_classes: list[str],
160
- spurious_class: str,
161
- train_spurious_context: str,
162
- test_spurious_context: str,
163
- num_train_images_per_class: int,
164
- num_test_images_per_class: int,
165
- ) -> MetashiftData:
166
- """Return (metadata, data) splits for the given data shift."""
167
- domains_specification = {
168
- **{cls: (None, None) for cls in selected_classes},
169
- spurious_class: (
170
- train_spurious_context,
171
- test_spurious_context,
172
- ), # overwrite spurious_class
173
- }
174
- domains = self._get_class_domains(domains_specification)
175
- sampled_domains = self._sample_from_domains(
176
- seed=seed,
177
- domains=domains,
178
- num_train_images_per_class=num_train_images_per_class,
179
- num_test_images_per_class=num_test_images_per_class,
180
- )
181
- data_splits = {"train": dict(), "test": dict()}
182
- for cls, (train_ids, test_ids) in sampled_domains.items():
183
- data_splits["train"][cls] = train_ids
184
- data_splits["test"][cls] = test_ids
185
-
186
- return MetashiftData(
187
- selected_classes=selected_classes,
188
- spurious_class=spurious_class,
189
- train_context=train_spurious_context,
190
- test_context=test_spurious_context,
191
- data_splits=DataSplits(
192
- train=data_splits["train"],
193
- test=data_splits["test"],
194
- ),
195
- )
196
-
197
- def _get_unique_ids_from_info(self, info: dict[str, MetashiftData]):
198
- """Get unique ids from info struct."""
199
- unique_ids = set()
200
- for data in info.values():
201
- for ids in data.data_splits.train.values():
202
- unique_ids.update(ids)
203
- for ids in data.data_splits.test.values():
204
- unique_ids.update(ids)
205
- return unique_ids
206
-
207
- def _replace_ids_with_paths(
208
- self, info: dict[str, MetashiftData], data_path: Path, out_path: Path
209
- ) -> MetashiftData:
210
- """Replace ids with paths."""
211
- new_data = dict()
212
- for dataset_name, data in info.items():
213
- for cls, ids in data.data_splits.train.items():
214
- data.data_splits.train[cls] = [
215
- str(data_path / f"{_id}.jpg") for _id in ids
216
- ]
217
- for cls, ids in data.data_splits.test.items():
218
- data.data_splits.test[cls] = [
219
- str(data_path / f"{_id}.jpg") for _id in ids
220
- ]
221
- new_data[dataset_name] = data
222
- return new_data
223
-
224
- def save_all(self, out_dir: str, info: dict[str, MetashiftData]):
225
- """Save all datasets to the given directory."""
226
- out_path = Path(out_dir)
227
- data_path = out_path / "data"
228
- data_path.mkdir(parents=True, exist_ok=True)
229
-
230
- unique_ids = self._get_unique_ids_from_info(info)
231
- data = self._replace_ids_with_paths(info, data_path, out_path)
232
- # for dataset_name, data in info.items():
233
- # with open(out_path / f"{dataset_name}.json", "w") as f:
234
- # f.write(data.model_dump_json(indent=2))
235
-
236
- # with tarfile.open(data_path / "images.tar.gz", "w:gz") as tar:
237
- # for _id in unique_ids:
238
- # tar.add(
239
- # Path(self.visual_genome_images_dir) / f"{_id}.jpg",
240
- # )
241
-
242
-
243
- def get_dataset_name(task_name: str, experiment_name: str) -> str:
244
- return f"{task_name}_{experiment_name}"
245
-
246
-
247
- def main():
248
- parser = setup_parser()
249
- args = parser.parse_args()
250
- config = OmegaConf.load(args.config)
251
- metashift_factory = MetashiftFactory(
252
- full_candidate_subsets_path=args.full_candidate_subsets_path,
253
- visual_genome_images_dir=args.visual_genome_images_dir,
254
- )
255
- info: dict[str, MetashiftData] = dict()
256
- for task_config in config.tasks:
257
- for experiment_config in task_config.experiments:
258
- data = metashift_factory.create(
259
- seed=task_config.seed,
260
- selected_classes=task_config.selected_classes,
261
- spurious_class=experiment_config.spurious_class,
262
- train_spurious_context=experiment_config.train_context,
263
- test_spurious_context=experiment_config.test_context,
264
- num_test_images_per_class=task_config.num_images_per_class_test,
265
- num_train_images_per_class=task_config.num_images_per_class_train,
266
- )
267
- dataset_name = get_dataset_name(task_config.name, experiment_config.name)
268
- assert dataset_name not in info
269
- info[dataset_name] = data
270
-
271
- metashift_factory.save_all(args.output_dir, info)
272
-
273
-
274
- if __name__ == "__main__":
 
275
  main()
 
1
+ import argparse
2
+ import shutil
3
+ import pickle
4
+ import logging
5
+ from omegaconf import OmegaConf
6
+ import re
7
+ import random
8
+ import tarfile
9
+ from pydantic import BaseModel
10
+ from pathlib import Path
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ def setup_parser():
17
+ parser = argparse.ArgumentParser(description="Generate a domain shift dataset")
18
+ parser.add_argument("--config", type=str, required=True, help="Path to config file")
19
+ parser.add_argument(
20
+ "--output_dir", type=str, required=True, help="Path to output directory"
21
+ )
22
+ parser.add_argument(
23
+ "--full_candidate_subsets_path",
24
+ type=str,
25
+ required=True,
26
+ help="Path to full-candidate-subsets.pkl",
27
+ )
28
+ parser.add_argument(
29
+ "--visual_genome_images_dir",
30
+ type=str,
31
+ required=True,
32
+ help="Path to VisualGenome images directory allImages/images",
33
+ )
34
+ return parser
35
+
36
+
37
+ def get_ms_domain_name(obj: str, context: str) -> str:
38
+ return f"{obj}({context})"
39
+
40
+
41
+ class DataSplits(BaseModel):
42
+ train: dict[str, list[str]]
43
+ test: dict[str, list[str]]
44
+
45
+
46
+ class MetashiftData(BaseModel):
47
+ selected_classes: list[str]
48
+ spurious_class: str
49
+ train_context: str
50
+ test_context: str
51
+ data_splits: DataSplits
52
+
53
+
54
+ class MetashiftFactory(object):
55
+ object_context_to_id: dict[str, list[int]]
56
+ visual_genome_images_dir: str
57
+
58
+ def __init__(
59
+ self,
60
+ full_candidate_subsets_path: str,
61
+ visual_genome_images_dir: str,
62
+ ):
63
+ """
64
+ full_candidate_subsets_path: Path to `full-candidate-subsets.pkl`
65
+ visual_genome_images_dir: Path to VisualGenome images directory `allImages/images`
66
+ """
67
+ with open(full_candidate_subsets_path, "rb") as f:
68
+ self.object_context_to_id = pickle.load(f)
69
+ self.visual_genome_images_dir = visual_genome_images_dir
70
+
71
+ def _get_all_domains_with_object(self, obj: str) -> set[str]:
72
+ """Get all domains with given object and any context.
73
+ Example:
74
+ - _get_all_domains_with_object(table) => [table(dog), table(cat), ...]
75
+ """
76
+ return {
77
+ key
78
+ for key in self.object_context_to_id.keys()
79
+ if re.match(f"^{obj}\\(.*\\)$", key)
80
+ }
81
+
82
+ def _get_all_image_ids_with_object(self, obj: str) -> set[str]:
83
+ """Get all image ids with given object and any context.
84
+ Example:
85
+ - get_all_image_ids_with_object(table) => [id~table(dog), id~table(cat), ...]
86
+ - where id~domain, means an image sampled from the given domain.
87
+ """
88
+ domains = self._get_all_domains_with_object(obj)
89
+ return {_id for domain in domains for _id in self.object_context_to_id[domain]}
90
+
91
+ def _get_image_ids(self, obj: str, context: str | None, exclude_context: str | None = None) -> set[str]:
92
+ """Get image ids for the domain `obj(context)`, optionally excluding a specific context."""
93
+ if exclude_context is not None:
94
+ all_ids = self._get_all_image_ids_with_object(obj)
95
+ exclude_ids = self.object_context_to_id[get_ms_domain_name(obj, exclude_context)]
96
+ return all_ids - exclude_ids
97
+ elif context is not None:
98
+ return self.object_context_to_id[get_ms_domain_name(obj, context)]
99
+ else:
100
+ return self._get_all_image_ids_with_object(obj)
101
+
102
+ def _get_class_domains(
103
+ self, domains_specification: dict[str, tuple[str, str | None]]
104
+ ) -> dict[str, tuple[list[str], list[str]]]:
105
+ """Get train and test image ids for the given domains specification."""
106
+ domain_ids = dict()
107
+ for cls, (train_context, test_context) in domains_specification.items():
108
+ if train_context == test_context:
109
+ # try alternative to remove the need of double context entries
110
+ train_ids = self._get_image_ids(cls, context=train_context)
111
+ test_ids = self._get_image_ids(cls, context=None, exclude_context=test_context)
112
+ domain_ids[cls] = [train_ids, test_ids]
113
+ logger.info(
114
+ f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
115
+ " -> "
116
+ f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
117
+ )
118
+ else:
119
+ train_ids = self._get_image_ids(cls, train_context)
120
+ test_ids = self._get_image_ids(cls, test_context)
121
+ domain_ids[cls] = [train_ids, test_ids]
122
+ logger.info(
123
+ f"{get_ms_domain_name(cls, train_context or '*')}: {len(train_ids)}"
124
+ " -> "
125
+ f"{get_ms_domain_name(cls, test_context or '*')}: {len(test_ids)}"
126
+ )
127
+ return domain_ids
128
+
129
+ def _sample_from_domains(
130
+ self,
131
+ seed: int,
132
+ domains: dict[str, tuple[list[str], list[str]]],
133
+ num_train_images_per_class: int,
134
+ num_test_images_per_class: int,
135
+ ) -> dict[str, tuple[list[str], list[str]]]:
136
+ """Return sampled domain data from the given full domains."""
137
+ # TODO: Do we have to ensure that there's no overlap between classes?
138
+ # For example, we could have repeated files in training for different classes.
139
+ sampled_domains = dict()
140
+ for cls, (train_ids, test_ids) in domains.items():
141
+ try:
142
+ sampled_train_ids = random.Random(seed).sample(
143
+ list(train_ids), num_train_images_per_class
144
+ )
145
+ test_ids = test_ids - set(sampled_train_ids)
146
+ sampled_test_ids = random.Random(seed).sample(
147
+ list(test_ids), num_test_images_per_class
148
+ )
149
+ except ValueError:
150
+ logger.error(
151
+ f"{cls}: {len(train_ids)} train images, {len(test_ids)} test images"
152
+ )
153
+ raise Exception("Not enough images for this class")
154
+ sampled_domains[cls] = (sampled_train_ids, sampled_test_ids)
155
+ return sampled_domains
156
+
157
+ def create(
158
+ self,
159
+ seed: int,
160
+ selected_classes: list[str],
161
+ spurious_class: str,
162
+ train_spurious_context: str,
163
+ test_spurious_context: str,
164
+ num_train_images_per_class: int,
165
+ num_test_images_per_class: int,
166
+ ) -> MetashiftData:
167
+ """Return (metadata, data) splits for the given data shift."""
168
+ domains_specification = {
169
+ **{cls: (None, None) for cls in selected_classes},
170
+ spurious_class: (
171
+ train_spurious_context,
172
+ test_spurious_context,
173
+ ), # overwrite spurious_class
174
+ }
175
+ domains = self._get_class_domains(domains_specification)
176
+ sampled_domains = self._sample_from_domains(
177
+ seed=seed,
178
+ domains=domains,
179
+ num_train_images_per_class=num_train_images_per_class,
180
+ num_test_images_per_class=num_test_images_per_class,
181
+ )
182
+ data_splits = {"train": dict(), "test": dict()}
183
+ for cls, (train_ids, test_ids) in sampled_domains.items():
184
+ data_splits["train"][cls] = train_ids
185
+ data_splits["test"][cls] = test_ids
186
+
187
+ return MetashiftData(
188
+ selected_classes=selected_classes,
189
+ spurious_class=spurious_class,
190
+ train_context=train_spurious_context,
191
+ test_context=test_spurious_context,
192
+ data_splits=DataSplits(
193
+ train=data_splits["train"],
194
+ test=data_splits["test"],
195
+ ),
196
+ )
197
+
198
+ def _get_unique_ids_from_info(self, info: dict[str, MetashiftData]):
199
+ """Get unique ids from info struct."""
200
+ unique_ids = set()
201
+ for data in info.values():
202
+ for ids in data.data_splits.train.values():
203
+ unique_ids.update(ids)
204
+ for ids in data.data_splits.test.values():
205
+ unique_ids.update(ids)
206
+ return unique_ids
207
+
208
+ def _replace_ids_with_paths(
209
+ self, info: dict[str, MetashiftData], data_path: Path, out_path: Path
210
+ ) -> MetashiftData:
211
+ """Replace ids with paths."""
212
+ new_data = dict()
213
+ for dataset_name, data in info.items():
214
+ for cls, ids in data.data_splits.train.items():
215
+ data.data_splits.train[cls] = [
216
+ str(data_path / f"{_id}.jpg") for _id in ids
217
+ ]
218
+ for cls, ids in data.data_splits.test.items():
219
+ data.data_splits.test[cls] = [
220
+ str(data_path / f"{_id}.jpg") for _id in ids
221
+ ]
222
+ new_data[dataset_name] = data
223
+ return new_data
224
+
225
+ def save_all(self, out_dir: str, info: dict[str, MetashiftData]):
226
+ """Save all datasets to the given directory."""
227
+ out_path = Path(out_dir)
228
+ data_path = out_path / "data"
229
+ data_path.mkdir(parents=True, exist_ok=True)
230
+
231
+ unique_ids = self._get_unique_ids_from_info(info)
232
+ data = self._replace_ids_with_paths(info, data_path, out_path)
233
+ # for dataset_name, data in info.items():
234
+ # with open(out_path / f"{dataset_name}.json", "w") as f:
235
+ # f.write(data.model_dump_json(indent=2))
236
+
237
+ # with tarfile.open(data_path / "images.tar.gz", "w:gz") as tar:
238
+ # for _id in unique_ids:
239
+ # tar.add(
240
+ # Path(self.visual_genome_images_dir) / f"{_id}.jpg",
241
+ # )
242
+
243
+
244
+ def get_dataset_name(task_name: str, experiment_name: str) -> str:
245
+ return f"{task_name}_{experiment_name}"
246
+
247
+
248
+ def main():
249
+ parser = setup_parser()
250
+ args = parser.parse_args()
251
+ config = OmegaConf.load(args.config)
252
+ metashift_factory = MetashiftFactory(
253
+ full_candidate_subsets_path=args.full_candidate_subsets_path,
254
+ visual_genome_images_dir=args.visual_genome_images_dir,
255
+ )
256
+ info: dict[str, MetashiftData] = dict()
257
+ for task_config in config.tasks:
258
+ for experiment_config in task_config.experiments:
259
+ data = metashift_factory.create(
260
+ seed=task_config.seed,
261
+ selected_classes=task_config.selected_classes,
262
+ spurious_class=experiment_config.spurious_class,
263
+ train_spurious_context=experiment_config.train_context,
264
+ test_spurious_context=experiment_config.test_context,
265
+ num_test_images_per_class=task_config.num_images_per_class_test,
266
+ num_train_images_per_class=task_config.num_images_per_class_train,
267
+ )
268
+ dataset_name = get_dataset_name(task_config.name, experiment_config.name)
269
+ assert dataset_name not in info
270
+ info[dataset_name] = data
271
+
272
+ metashift_factory.save_all(args.output_dir, info)
273
+
274
+
275
+ if __name__ == "__main__":
276
  main()