|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dataset for filtered Kvasir-instrument and Hyper-Kvasir with bounding boxes.""" |
|
|
|
import os |
|
import json |
|
from PIL import Image |
|
import datasets |
|
|
|
import os |
|
import json |
|
import pandas as pd |
|
import hashlib |
|
|
|
|
|
cal_mid = lambda bx: [[[float(box['xmin'] + box['xmax']) / 2, float(box['ymin'] + box['ymax']) / 2] for box in bx]] |
|
|
|
|
|
def cal_sha256(file_path): return hashlib.sha256( |
|
open(file_path, 'rb').read()).hexdigest() |
|
|
|
|
|
hyper_label_img_path = '/global/D1/projects/HOST/Datasets/hyper-kvasir/labeled-images/image-labels.csv' |
|
|
|
hyper_df = pd.read_csv(hyper_label_img_path) |
|
|
|
hyper_seg_img_path = '/global/D1/projects/HOST/Datasets/hyper-kvasir/segmented-images/bounding-boxes.json' |
|
hyper_seg_img_base_path = "/global/D1/projects/HOST/Datasets/hyper-kvasir/segmented-images/images" |
|
|
|
instr_seg_img_path = '/global/D1/projects/HOST/Datasets/kvasir-instrument/bboxes.json' |
|
instr_seg_img_base_path = '/global/D1/projects/HOST/Datasets/kvasir-instrument/images/' |
|
|
|
hyper_seg_imgs = json.load(open(hyper_seg_img_path)) |
|
instr_seg_imgs = json.load(open(instr_seg_img_path)) |
|
|
|
_CITATION = """\ |
|
@article{kvasir, |
|
title={Kvasir-instrument and Hyper-Kvasir datasets for bounding box annotations}, |
|
author={Sushant Gautam and collaborators}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
Filtered Kvasir-instrument and Hyper-Kvasir datasets with bounding boxes for medical imaging tasks. |
|
Each entry contains images, bounding box coordinates, and additional metadata. |
|
""" |
|
|
|
_HOMEPAGE = "https://example.com/kvasir-hyper-bbox" |
|
|
|
_LICENSE = "CC BY-NC 4.0" |
|
|
|
_URLS = { |
|
"filtered_data": "https://example.com/kvasir-hyper-bbox-dataset.zip" |
|
} |
|
|
|
|
|
class KvasirHyperBBox(datasets.GeneratorBasedBuilder): |
|
"""Dataset for Kvasir-instrument and Hyper-Kvasir with bounding boxes.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="bbox_dataset", |
|
version=VERSION, |
|
description="Dataset with bounding box annotations." |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "bbox_dataset" |
|
|
|
def _info(self): |
|
features = datasets.Features({ |
|
"image_data": datasets.Image(), |
|
"image_sha256": datasets.Value("string"), |
|
"points": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("float32")))), |
|
"count": datasets.Value("int64"), |
|
"label": datasets.Value("string"), |
|
"collection_method": datasets.Value("string"), |
|
"classification": datasets.Value("string"), |
|
"organ": datasets.Value("string") |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
features=features |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={}, |
|
) |
|
] |
|
|
|
def _generate_examples(self): |
|
for key, entry in hyper_seg_imgs.items(): |
|
img_path = os.path.join(hyper_seg_img_base_path, f"{key}.jpg") |
|
hyper_entry = hyper_df.loc[hyper_df['Video file'] == key].iloc[0] |
|
yield key, { |
|
"image_data": open(img_path, 'rb').read(), |
|
"image_sha256": cal_sha256(img_path), |
|
"points": cal_mid(entry['bbox']), |
|
"count": len(entry['bbox']), |
|
"label": hyper_entry.Finding, |
|
"collection_method": 'counting', |
|
"classification": hyper_entry.Classification, |
|
"organ": hyper_entry.Organ |
|
} |
|
|
|
for key, entry in instr_seg_imgs.items(): |
|
img_path = os.path.join(instr_seg_img_base_path, f"{key}.jpg") |
|
assert len(cal_mid(entry['bbox'])) > 0 |
|
yield key, { |
|
"image_data": open(img_path, 'rb').read(), |
|
"image_sha256": cal_sha256(img_path), |
|
"points": cal_mid(entry['bbox']), |
|
"count": len(entry['bbox']), |
|
"label": "instrument", |
|
"collection_method": "counting", |
|
"classification": "instrument", |
|
"organ": "instrument" |
|
} |
|
|
|
|
|
|
|
|