shunk031 commited on
Commit
3b2b0ce
·
unverified ·
1 Parent(s): ab84bbe

add flag for decoding RLE segmentation map (#3)

Browse files
Files changed (2) hide show
  1. COCOA.py +74 -28
  2. tests/COCOA_test.py +8 -1
COCOA.py CHANGED
@@ -3,13 +3,12 @@ import logging
3
  import os
4
  from collections import defaultdict
5
  from dataclasses import asdict, dataclass
6
- from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
7
 
8
  import datasets as ds
9
  import numpy as np
10
  from PIL import Image
11
  from PIL.Image import Image as PilImage
12
- from pycocotools import mask as cocomask
13
  from tqdm.auto import tqdm
14
 
15
  logger = logging.getLogger(__name__)
@@ -146,21 +145,28 @@ class BsDsImageData(ImageData):
146
  )
147
 
148
 
 
 
 
 
 
149
  @dataclass
150
  class RegionAnnotationData(object):
151
- segmentation: np.ndarray
152
  name: str
153
  area: float
154
  is_stuff: bool
155
  occlude_rate: float
156
  order: int
157
- visible_mask: Optional[np.ndarray] = None
158
- invisible_mask: Optional[np.ndarray] = None
159
 
160
  @classmethod
161
  def rle_segmentation_to_binary_mask(
162
  cls, segmentation, height: int, width: int
163
  ) -> np.ndarray:
 
 
164
  if isinstance(segmentation, list):
165
  rles = cocomask.frPyObjects([segmentation], h=height, w=width)
166
  rle = cocomask.merge(rles)
@@ -180,6 +186,8 @@ class RegionAnnotationData(object):
180
 
181
  @classmethod
182
  def get_visible_binary_mask(cls, rle_visible_mask=None) -> Optional[np.ndarray]:
 
 
183
  if rle_visible_mask is None:
184
  return None
185
  return cocomask.decode(rle_visible_mask)
@@ -199,21 +207,28 @@ class RegionAnnotationData(object):
199
 
200
  @classmethod
201
  def from_dict(
202
- cls, json_dict: JsonDict, image_data: ImageData
 
 
 
203
  ) -> "RegionAnnotationData":
204
- segmentation = json_dict["segmentation"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
- segmentation_mask = cls.rle_segmentation_to_mask(
207
- segmentation=segmentation,
208
- height=image_data.height,
209
- width=image_data.width,
210
- )
211
- visible_mask = cls.get_visible_mask(
212
- rle_visible_mask=json_dict.get("visible_mask")
213
- )
214
- invisible_mask = cls.get_invisible_mask(
215
- rle_invisible_mask=json_dict.get("invisible_mask")
216
- )
217
  return cls(
218
  segmentation=segmentation_mask,
219
  visible_mask=visible_mask,
@@ -237,13 +252,15 @@ class CocoaAnnotationData(object):
237
 
238
  @classmethod
239
  def from_dict(
240
- cls, json_dict: JsonDict, images: Dict[ImageId, ImageData]
241
  ) -> "CocoaAnnotationData":
242
  image_id = json_dict["image_id"]
243
 
244
  regions = [
245
  RegionAnnotationData.from_dict(
246
- json_dict=region_dict, image_data=images[image_id]
 
 
247
  )
248
  for region_dict in json_dict["regions"]
249
  ]
@@ -282,23 +299,32 @@ def _load_images_data(
282
  def _load_cocoa_data(
283
  ann_dicts: List[JsonDict],
284
  images: Dict[ImageId, ImageData],
 
285
  tqdm_desc: str = "Load COCOA annotations",
286
- ):
287
  annotations = defaultdict(list)
288
  ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
289
 
290
  for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
291
- cocoa_data = CocoaAnnotationData.from_dict(ann_dict, images=images)
 
 
292
  annotations[cocoa_data.image_id].append(cocoa_data)
293
 
294
  return annotations
295
 
296
 
 
 
 
 
 
297
  class CocoaDataset(ds.GeneratorBasedBuilder):
298
  VERSION = ds.Version("1.0.0")
 
299
  BUILDER_CONFIGS = [
300
- ds.BuilderConfig(name="COCO", version=VERSION),
301
- ds.BuilderConfig(name="BSDS", version=VERSION),
302
  ]
303
 
304
  def load_amodal_annotation(self, ann_json_path: str) -> JsonDict:
@@ -336,20 +362,35 @@ class CocoaDataset(ds.GeneratorBasedBuilder):
336
  else:
337
  raise ValueError(f"Invalid dataset name: {self.config.name}")
338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  features_dict["annotations"] = ds.Sequence(
340
  {
341
  "author": ds.Value("string"),
342
  "url": ds.Value("string"),
343
  "regions": ds.Sequence(
344
  {
345
- "segmentation": ds.Image(),
346
  "name": ds.Value("string"),
347
  "area": ds.Value("float32"),
348
  "is_stuff": ds.Value("bool"),
349
  "occlude_rate": ds.Value("float32"),
350
  "order": ds.Value("int32"),
351
- "visible_mask": ds.Image(),
352
- "invisible_mask": ds.Image(),
353
  }
354
  ),
355
  "image_id": ds.Value("int64"),
@@ -500,13 +541,18 @@ class CocoaDataset(ds.GeneratorBasedBuilder):
500
  image_dicts=ann_json["images"],
501
  dataset_name=self.config.name,
502
  )
503
- annotations = _load_cocoa_data(ann_dicts=ann_json["annotations"], images=images)
 
 
 
 
504
 
505
  for idx, image_id in enumerate(images.keys()):
506
  image_data = images[image_id]
507
  image_anns = annotations[image_id]
508
 
509
  if len(image_anns) < 1:
 
510
  continue
511
 
512
  image = _load_image(
 
3
  import os
4
  from collections import defaultdict
5
  from dataclasses import asdict, dataclass
6
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Type, TypedDict, Union
7
 
8
  import datasets as ds
9
  import numpy as np
10
  from PIL import Image
11
  from PIL.Image import Image as PilImage
 
12
  from tqdm.auto import tqdm
13
 
14
  logger = logging.getLogger(__name__)
 
145
  )
146
 
147
 
148
+ class RunLengthEncoding(TypedDict):
149
+ counts: str
150
+ size: Tuple[int, int]
151
+
152
+
153
  @dataclass
154
  class RegionAnnotationData(object):
155
+ segmentation: Union[List[float], np.ndarray]
156
  name: str
157
  area: float
158
  is_stuff: bool
159
  occlude_rate: float
160
  order: int
161
+ visible_mask: Optional[Union[np.ndarray, RunLengthEncoding]] = None
162
+ invisible_mask: Optional[Union[np.ndarray, RunLengthEncoding]] = None
163
 
164
  @classmethod
165
  def rle_segmentation_to_binary_mask(
166
  cls, segmentation, height: int, width: int
167
  ) -> np.ndarray:
168
+ from pycocotools import mask as cocomask
169
+
170
  if isinstance(segmentation, list):
171
  rles = cocomask.frPyObjects([segmentation], h=height, w=width)
172
  rle = cocomask.merge(rles)
 
186
 
187
  @classmethod
188
  def get_visible_binary_mask(cls, rle_visible_mask=None) -> Optional[np.ndarray]:
189
+ from pycocotools import mask as cocomask
190
+
191
  if rle_visible_mask is None:
192
  return None
193
  return cocomask.decode(rle_visible_mask)
 
207
 
208
  @classmethod
209
  def from_dict(
210
+ cls,
211
+ json_dict: JsonDict,
212
+ image_data: ImageData,
213
+ decode_rle: bool,
214
  ) -> "RegionAnnotationData":
215
+ if decode_rle:
216
+ segmentation_mask = cls.rle_segmentation_to_mask(
217
+ segmentation=json_dict["segmentation"],
218
+ height=image_data.height,
219
+ width=image_data.width,
220
+ )
221
+ visible_mask = cls.get_visible_mask(
222
+ rle_visible_mask=json_dict.get("visible_mask")
223
+ )
224
+ invisible_mask = cls.get_invisible_mask(
225
+ rle_invisible_mask=json_dict.get("invisible_mask")
226
+ )
227
+ else:
228
+ segmentation_mask = json_dict["segmentation"]
229
+ visible_mask = json_dict.get("visible_mask")
230
+ invisible_mask = json_dict.get("invisible_mask")
231
 
 
 
 
 
 
 
 
 
 
 
 
232
  return cls(
233
  segmentation=segmentation_mask,
234
  visible_mask=visible_mask,
 
252
 
253
  @classmethod
254
  def from_dict(
255
+ cls, json_dict: JsonDict, images: Dict[ImageId, ImageData], decode_rle: bool
256
  ) -> "CocoaAnnotationData":
257
  image_id = json_dict["image_id"]
258
 
259
  regions = [
260
  RegionAnnotationData.from_dict(
261
+ json_dict=region_dict,
262
+ image_data=images[image_id],
263
+ decode_rle=decode_rle,
264
  )
265
  for region_dict in json_dict["regions"]
266
  ]
 
299
  def _load_cocoa_data(
300
  ann_dicts: List[JsonDict],
301
  images: Dict[ImageId, ImageData],
302
+ decode_rle: bool,
303
  tqdm_desc: str = "Load COCOA annotations",
304
+ ) -> Dict[ImageId, List[CocoaAnnotationData]]:
305
  annotations = defaultdict(list)
306
  ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
307
 
308
  for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
309
+ cocoa_data = CocoaAnnotationData.from_dict(
310
+ ann_dict, images=images, decode_rle=decode_rle
311
+ )
312
  annotations[cocoa_data.image_id].append(cocoa_data)
313
 
314
  return annotations
315
 
316
 
317
+ @dataclass
318
+ class CocoaConfig(ds.BuilderConfig):
319
+ decode_rle: bool = False
320
+
321
+
322
  class CocoaDataset(ds.GeneratorBasedBuilder):
323
  VERSION = ds.Version("1.0.0")
324
+ BUILDER_CONFIG_CLASS = CocoaConfig
325
  BUILDER_CONFIGS = [
326
+ CocoaConfig(name="COCO", version=VERSION, decode_rle=False),
327
+ CocoaConfig(name="BSDS", version=VERSION, decode_rle=False),
328
  ]
329
 
330
  def load_amodal_annotation(self, ann_json_path: str) -> JsonDict:
 
362
  else:
363
  raise ValueError(f"Invalid dataset name: {self.config.name}")
364
 
365
+ if self.config.decode_rle: # type: ignore
366
+ segmentation_feature = ds.Image()
367
+ visible_mask_feature = ds.Image()
368
+ invisible_mask_feature = ds.Image()
369
+ else:
370
+ segmentation_feature = ds.Sequence(ds.Value("float32"))
371
+ visible_mask_feature = {
372
+ "counts": ds.Value("string"),
373
+ "size": ds.Sequence(ds.Value("int32")),
374
+ }
375
+ invisible_mask_feature = {
376
+ "counts": ds.Value("string"),
377
+ "size": ds.Sequence(ds.Value("int32")),
378
+ }
379
+
380
  features_dict["annotations"] = ds.Sequence(
381
  {
382
  "author": ds.Value("string"),
383
  "url": ds.Value("string"),
384
  "regions": ds.Sequence(
385
  {
386
+ "segmentation": segmentation_feature,
387
  "name": ds.Value("string"),
388
  "area": ds.Value("float32"),
389
  "is_stuff": ds.Value("bool"),
390
  "occlude_rate": ds.Value("float32"),
391
  "order": ds.Value("int32"),
392
+ "visible_mask": visible_mask_feature,
393
+ "invisible_mask": invisible_mask_feature,
394
  }
395
  ),
396
  "image_id": ds.Value("int64"),
 
541
  image_dicts=ann_json["images"],
542
  dataset_name=self.config.name,
543
  )
544
+ annotations = _load_cocoa_data(
545
+ ann_dicts=ann_json["annotations"],
546
+ images=images,
547
+ decode_rle=self.config.decode_rle, # type: ignore
548
+ )
549
 
550
  for idx, image_id in enumerate(images.keys()):
551
  image_data = images[image_id]
552
  image_anns = annotations[image_id]
553
 
554
  if len(image_anns) < 1:
555
+ # The original COCO and BSDS datasets may not have amodal annotations.
556
  continue
557
 
558
  image = _load_image(
tests/COCOA_test.py CHANGED
@@ -18,6 +18,10 @@ def data_dir() -> str:
18
  return "annotations.tar.gz"
19
 
20
 
 
 
 
 
21
  @pytest.mark.parametrize(
22
  argnames=(
23
  "dataset_name",
@@ -37,8 +41,11 @@ def test_load_dataset(
37
  expected_num_train: int,
38
  expected_num_validation: int,
39
  expected_num_test: int,
 
40
  ):
41
- dataset = ds.load_dataset(path=dataset_path, name=dataset_name, data_dir=data_dir)
 
 
42
 
43
  assert dataset["train"].num_rows == expected_num_train # type: ignore
44
  assert dataset["validation"].num_rows == expected_num_validation # type: ignore
 
18
  return "annotations.tar.gz"
19
 
20
 
21
+ @pytest.mark.parametrize(
22
+ argnames="decode_rle,",
23
+ argvalues=(False, True),
24
+ )
25
  @pytest.mark.parametrize(
26
  argnames=(
27
  "dataset_name",
 
41
  expected_num_train: int,
42
  expected_num_validation: int,
43
  expected_num_test: int,
44
+ decode_rle: bool,
45
  ):
46
+ dataset = ds.load_dataset(
47
+ path=dataset_path, name=dataset_name, data_dir=data_dir, decode_rle=decode_rle
48
+ )
49
 
50
  assert dataset["train"].num_rows == expected_num_train # type: ignore
51
  assert dataset["validation"].num_rows == expected_num_validation # type: ignore