|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TextCaps loading script.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from multiprocessing.sharedctypes import Value |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@article{sidorov2019textcaps, |
|
title={TextCaps: a Dataset for Image Captioningwith Reading Comprehension}, |
|
author={Sidorov, Oleksii and Hu, Ronghang and Rohrbach, Marcus and Singh, Amanpreet}, |
|
journal={arXiv preprint arXiv:2003.12462}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
extCaps requires models to read and reason about text in images to generate captions about them. Specifically, models need to incorporate a new modality of text present in the images and reason over it and visual content in the image to generate image descriptions. |
|
Current state-of-the-art models fail to generate captions for images in TextCaps because they do not have text reading and reasoning capabilities. See the examples in the image to compare ground truth answers and corresponding predictions by a state-of-the-art model. |
|
""" |
|
|
|
_HOMEPAGE = "https://textvqa.org/textcaps/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_URLS = { |
|
"captions": { |
|
"train": "https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_train.json", |
|
"val": "https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_val.json", |
|
"test": "https://dl.fbaipublicfiles.com/textvqa/data/textcaps/TextCaps_0.1_test.json", |
|
}, |
|
"images": { |
|
"train": "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip", |
|
"val": "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip", |
|
"test": "https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip", |
|
}, |
|
"ocr_tokens": { |
|
"train": "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_Rosetta_OCR_v0.2_train.json", |
|
"val": "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_Rosetta_OCR_v0.2_val.json", |
|
"test": "https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_Rosetta_OCR_v0.2_test.json", |
|
}, |
|
} |
|
|
|
_SUB_FOLDER_OR_FILE_NAME = { |
|
"images": { |
|
"train": "train_images", |
|
"val": "train_images", |
|
"test": "test_images", |
|
}, |
|
} |
|
|
|
|
|
class TextCapsDataset(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"ocr_tokens": [datasets.Value("string")], |
|
"ocr_info": [ |
|
{ |
|
"word": datasets.Value("string"), |
|
"bounding_box": { |
|
"width": datasets.Value("float"), |
|
"height": datasets.Value("float"), |
|
"rotation": datasets.Value("float"), |
|
"roll": datasets.Value("float"), |
|
"pitch": datasets.Value("float"), |
|
"yaw": datasets.Value("float"), |
|
"top_left_x": datasets.Value("float"), |
|
"top_left_y": datasets.Value("float"), |
|
}, |
|
} |
|
], |
|
"image": datasets.Image(), |
|
"image_id": datasets.Value("string"), |
|
"image_classes": [datasets.Value("string")], |
|
"flickr_original_url": datasets.Value("string"), |
|
"flickr_300k_url": datasets.Value("string"), |
|
"image_width": datasets.Value("int32"), |
|
"image_height": datasets.Value("int32"), |
|
"set_name": datasets.Value("string"), |
|
"image_name": datasets.Value("string"), |
|
"image_path": datasets.Value("string"), |
|
"reference_strs": [datasets.Value("string")], |
|
"reference_tokens": [[datasets.Value("string")]], |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
gen_kwargs = { |
|
split_name: { |
|
f"{dir_name}_path": Path(data_dir[dir_name][split_name]) |
|
if split_name in data_dir[dir_name] |
|
else None |
|
for dir_name in _URLS.keys() |
|
} |
|
for split_name in ["train", "val", "test"] |
|
} |
|
|
|
for split_name in ["train", "val", "test"]: |
|
gen_kwargs[split_name]["split_name"] = split_name |
|
gen_kwargs[split_name]["images_path"] = ( |
|
gen_kwargs[split_name]["images_path"] |
|
/ _SUB_FOLDER_OR_FILE_NAME["images"][split_name] |
|
) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs=gen_kwargs["train"], |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs=gen_kwargs["val"], |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs=gen_kwargs["test"], |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, captions_path, ocr_tokens_path, images_path, split_name |
|
): |
|
seen_image_ids = set() |
|
captions = json.load(open(captions_path, "r"))["data"] |
|
ocr_tokens = json.load(open(ocr_tokens_path, "r"))["data"] |
|
|
|
ocr_tokens_per_image_id = {} |
|
for ocr_item in ocr_tokens: |
|
ocr_tokens_per_image_id[ocr_item["image_id"]] = ocr_item |
|
|
|
for caption_item in captions: |
|
if caption_item["image_id"] in seen_image_ids: |
|
continue |
|
seen_image_ids.add(caption_item["image_id"]) |
|
ocr_item = ocr_tokens_per_image_id[caption_item["image_id"]] |
|
record = { |
|
"ocr_tokens": ocr_item["ocr_tokens"], |
|
"ocr_info": ocr_item["ocr_info"], |
|
"image_id": caption_item["image_id"], |
|
"image_classes": caption_item["image_classes"], |
|
"flickr_original_url": caption_item["flickr_original_url"], |
|
"flickr_300k_url": caption_item["flickr_300k_url"], |
|
"image_width": caption_item["image_width"], |
|
"image_height": caption_item["image_height"], |
|
"set_name": caption_item["set_name"], |
|
"image_name": caption_item["image_name"], |
|
"image_path": caption_item["image_path"], |
|
"image" : str(images_path / f'{caption_item["image_name"]}.jpg') |
|
} |
|
if not split_name == "test": |
|
record["reference_strs"] = caption_item["reference_strs"] |
|
record["reference_tokens"] = caption_item["reference_tokens"] |
|
else: |
|
record["reference_strs"] = None |
|
record["reference_tokens"] = None |
|
yield caption_item["image_id"], record |
|
|