florianbussmann commited on
Commit
d51655a
·
1 Parent(s): 6f014eb

Add dataset loading script for revised FUNSD dataset

Browse files
Files changed (1) hide show
  1. funsd.py +157 -0
funsd.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+ from PIL import Image
8
+ import numpy as np
9
+
10
+ logger = datasets.logging.get_logger(__name__)
11
+
12
+
13
+ _CITATION = """\
14
+ @article{vu2020revising,
15
+ title={Revising FUNSD dataset for key-value detection in document images},
16
+ author={Vu, Hieu M and Nguyen, Diep Thi-Ngoc},
17
+ journal={arXiv preprint arXiv:2010.05322},
18
+ year={2020}
19
+ }
20
+ """
21
+ _DESCRIPTION = """\
22
+ FUNSD is one of the limited publicly available datasets for information extraction from document images.
23
+ The information in the FUNSD dataset is defined by text areas of four categories ("key", "value", "header", "other", and "background")
24
+ and connectivity between areas as key-value relations. Inspecting FUNSD, we found several inconsistency in labeling, which impeded its
25
+ applicability to the key-value extraction problem. In this report, we described some labeling issues in FUNSD and the revision we made
26
+ to the dataset.
27
+ """
28
+
29
+ _URL = """\
30
+ https://drive.google.com/drive/folders/1HjJyoKqAh-pvtg3eQAmrbfzPccQZ48rz
31
+ """
32
+
33
+
34
+ def load_image(image_path):
35
+ image = Image.open(image_path).convert("RGB")
36
+ w, h = image.size
37
+ return image, (w, h)
38
+
39
+
40
+ def normalize_bbox(bbox, size):
41
+ return [
42
+ int(1000 * bbox[0] / size[0]),
43
+ int(1000 * bbox[1] / size[1]),
44
+ int(1000 * bbox[2] / size[0]),
45
+ int(1000 * bbox[3] / size[1]),
46
+ ]
47
+
48
+
49
+ class FunsdConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for FUNSD"""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for FUNSD.
54
+
55
+ Args:
56
+ **kwargs: keyword arguments forwarded to super.
57
+ """
58
+ super(FunsdConfig, self).__init__(**kwargs)
59
+
60
+
61
+ class Funsd(datasets.GeneratorBasedBuilder):
62
+ """FUNSD: Form Understanding in Noisy Scanned Documents."""
63
+
64
+ BUILDER_CONFIGS = [
65
+ FunsdConfig(
66
+ name="funsd_vu2020revising",
67
+ version=datasets.Version("1.0.0"),
68
+ description="Revised FUNSD dataset",
69
+ ),
70
+ ]
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "id": datasets.Value("string"),
78
+ "words": datasets.Sequence(datasets.Value("string")),
79
+ "bboxes": datasets.Sequence(
80
+ datasets.Sequence(datasets.Value("int64"))
81
+ ),
82
+ "ner_tags": datasets.Sequence(
83
+ datasets.features.ClassLabel(
84
+ names=[
85
+ "O",
86
+ "B-HEADER",
87
+ "I-HEADER",
88
+ "B-QUESTION",
89
+ "I-QUESTION",
90
+ "B-ANSWER",
91
+ "I-ANSWER",
92
+ ]
93
+ )
94
+ ),
95
+ "image_path": datasets.Value("string"),
96
+ }
97
+ ),
98
+ supervised_keys=None,
99
+ homepage="https://guillaumejaume.github.io/FUNSD/",
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+ """Returns SplitGenerators."""
105
+ downloaded_file = dl_manager.download_and_extract(
106
+ "https://drive.google.com/uc?export=download&id=1wdJJQgRIb1c404SJnX1dyBSi7U2mVduI"
107
+ )
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={"filepath": f"{downloaded_file}/FUNSD/training_data/"},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"filepath": f"{downloaded_file}/FUNSD/testing_data/"},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, filepath):
120
+ logger.info("⏳ Generating examples from = %s", filepath)
121
+ ann_dir = os.path.join(filepath, "adjusted_annotations")
122
+ img_dir = os.path.join(filepath, "images")
123
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
124
+ words = []
125
+ bboxes = []
126
+ ner_tags = []
127
+ file_path = os.path.join(ann_dir, file)
128
+ with open(file_path, "r", encoding="utf8") as f:
129
+ data = json.load(f)
130
+ image_path = os.path.join(img_dir, file)
131
+ image_path = image_path.replace("json", "png")
132
+ _, size = load_image(image_path)
133
+ for item in data["form"]:
134
+ words_example, label = item["words"], item["label"]
135
+ words_example = [w for w in words_example if w["text"].strip() != ""]
136
+ if len(words_example) == 0:
137
+ continue
138
+ if label == "other":
139
+ for w in words_example:
140
+ words.append(w["text"])
141
+ ner_tags.append("O")
142
+ bboxes.append(normalize_bbox(w["box"], size))
143
+ else:
144
+ words.append(words_example[0]["text"])
145
+ ner_tags.append("B-" + label.upper())
146
+ bboxes.append(normalize_bbox(words_example[0]["box"], size))
147
+ for w in words_example[1:]:
148
+ words.append(w["text"])
149
+ ner_tags.append("I-" + label.upper())
150
+ bboxes.append(normalize_bbox(w["box"], size))
151
+ yield guid, {
152
+ "id": str(guid),
153
+ "words": words,
154
+ "bboxes": bboxes,
155
+ "ner_tags": ner_tags,
156
+ "image_path": image_path,
157
+ }