Youssef Benhachem commited on
Commit
81100fc
·
1 Parent(s): 2292243

Restor Previous Version

Browse files
Groundtruth-Unicode.xlsx ADDED
Binary file (116 kB). View file
 
KHATT.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import datasets
17
+
18
+
19
+ _CITATION = """\
20
+ @article{Pattern Recognition,
21
+ Author = {bri A. Mahmoud, Irfan Ahmad, Wasfi G. Al-Khatib, Mohammad Alshayeb, Mohammad Tanvir Parvez, Volker Märgner, Gernot A. Fink},
22
+ Title = { {KHATT: An Open Arabic Offline Handwritten Text Database} },
23
+ Year = {2013},
24
+ doi = {10.1016/j.patcog.2013.08.009},
25
+ }
26
+ """
27
+
28
+ _HOMEPAGE = "https://khatt.ideas2serve.net/KHATTAgreement.php"
29
+
30
+ _DESCRIPTION = """\
31
+ KHATT (KFUPM Handwritten Arabic TexT) database is a database of unconstrained handwritten Arabic Text written by 1000 different writers. This research database’s development was undertaken by a research group from KFUPM, Dhahran, S audi Arabia headed by Professor Sabri Mahmoud in collaboration with Professor Fink from TU-Dortmund, Germany and Dr. Märgner from TU-Braunschweig, Germany.
32
+ """
33
+
34
+ _DATA_URL = {
35
+ "train": [
36
+ "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/train.zip"
37
+ ],
38
+ "validation": [
39
+ "https://huggingface.co/datasets/benhachem/KHATT/resolve/main/data/validation.zip"
40
+ ],
41
+
42
+ }
43
+
44
+
45
+ class KHATT(datasets.GeneratorBasedBuilder):
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "image": datasets.Image(),
54
+ "text": datasets.Value("string"),
55
+ }
56
+ ),
57
+ homepage=_HOMEPAGE,
58
+ citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ """Returns SplitGenerators."""
63
+ archives = dl_manager.download(_DATA_URL)
64
+
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={
69
+ "archives": [dl_manager.iter_archive(archive) for archive in archives["train"]],
70
+ "split": "train",
71
+ },
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.VALIDATION,
75
+ gen_kwargs={
76
+ "archives": [dl_manager.iter_archive(archive) for archive in archives["validation"]],
77
+ "split": "validation",
78
+ },
79
+ ),
80
+ ]
81
+ def _generate_examples(self, archives, split):
82
+ """Yields examples."""
83
+ idx = 0
84
+
85
+ for archive in archives:
86
+ for path, file in archive:
87
+ # If we have an image
88
+ if path.endswith(".tif"):
89
+ if split != "test":
90
+ img_file = file
91
+ else:
92
+ text = ""
93
+
94
+ elif path.endswith(".txt"):
95
+
96
+ text = file.read()
97
+ text = text.decode('utf-8')
98
+
99
+ ex = {"image": {"path": path, "bytes": img_file.read()}, "text": text}
100
+
101
+ yield idx, ex
102
+
103
+ idx += 1
README.md CHANGED
@@ -1,11 +1,13 @@
 
1
  ---
2
  configs:
3
  - config_name: default
4
  data_files:
5
  - split: train
6
- path: data/train-*
7
  - split: validation
8
- path: data/validation-*
 
9
  task_categories:
10
  - image-to-text
11
  language:
@@ -19,25 +21,6 @@ tags:
19
  - Textline images
20
  size_categories:
21
  - 1K<n<10K
22
- dataset_info:
23
- features:
24
- - name: image
25
- dtype: image
26
- - name: label
27
- dtype:
28
- class_label:
29
- names:
30
- '0': Training
31
- '1': Validation
32
- splits:
33
- - name: train
34
- num_bytes: 19188705.2
35
- num_examples: 1400
36
- - name: validation
37
- num_bytes: 3038515.0
38
- num_examples: 233
39
- download_size: 14348228
40
- dataset_size: 22227220.2
41
  ---
42
 
43
  # KFUPM Handwritten Arabic TexT (KHATT) database
 
1
+
2
  ---
3
  configs:
4
  - config_name: default
5
  data_files:
6
  - split: train
7
+ path: "data/train.zip"
8
  - split: validation
9
+ path: "data/validation.zip"
10
+
11
  task_categories:
12
  - image-to-text
13
  language:
 
21
  - Textline images
22
  size_categories:
23
  - 1K<n<10K
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  ---
25
 
26
  # KFUPM Handwritten Arabic TexT (KHATT) database
data/{train-00000-of-00001.parquet → train.zip} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:877cbd1b06b969103861eb48cdfe8bbb3d80f5cfb285c8759b61cc29ae5880b3
3
- size 12305075
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a894d11e475804e894ef82f52d81afdad717563b9b9188518e825d9f0f5df291
3
+ size 11952092
data/{validation-00000-of-00001.parquet → validation.zip} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4be5c07dddfa8894eeb4c82f841265573feb6413f881c4489491d027003cdb0
3
- size 2043153
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aeaa5162d4397442f1c43af296e3e5d75232c7986ef1f69492e920bb778671b
3
+ size 1971955