Datasets:
Commit
·
7d938bd
0
Parent(s):
Update files from the datasets library (from 1.2.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.2.0
- .gitattributes +27 -0
- README.md +191 -0
- dataset_infos.json +0 -0
- dummy/0.1.0/dummy_data.zip +3 -0
- hebrew_projectbenyehuda.py +141 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- expert-generated
|
4 |
+
language_creators:
|
5 |
+
- found
|
6 |
+
languages:
|
7 |
+
- he
|
8 |
+
licenses:
|
9 |
+
- mit
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
size_categories:
|
13 |
+
- 10K<n<100K
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
task_categories:
|
17 |
+
- sequence-modeling
|
18 |
+
task_ids:
|
19 |
+
- language-modeling
|
20 |
+
---
|
21 |
+
|
22 |
+
# Dataset Card for Hebrew Projectbenyehuda
|
23 |
+
|
24 |
+
## Table of Contents
|
25 |
+
- [Dataset Description](#dataset-description)
|
26 |
+
- [Dataset Summary](#dataset-summary)
|
27 |
+
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
28 |
+
- [Languages](#languages)
|
29 |
+
- [Dataset Structure](#dataset-structure)
|
30 |
+
- [Data Instances](#data-instances)
|
31 |
+
- [Data Fields](#data-instances)
|
32 |
+
- [Data Splits](#data-instances)
|
33 |
+
- [Dataset Creation](#dataset-creation)
|
34 |
+
- [Curation Rationale](#curation-rationale)
|
35 |
+
- [Source Data](#source-data)
|
36 |
+
- [Annotations](#annotations)
|
37 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
38 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
39 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
40 |
+
- [Discussion of Biases](#discussion-of-biases)
|
41 |
+
- [Other Known Limitations](#other-known-limitations)
|
42 |
+
- [Additional Information](#additional-information)
|
43 |
+
- [Dataset Curators](#dataset-curators)
|
44 |
+
- [Licensing Information](#licensing-information)
|
45 |
+
- [Citation Information](#citation-information)
|
46 |
+
|
47 |
+
## Dataset Description
|
48 |
+
|
49 |
+
- **Homepage:** https://github.com/projectbenyehuda/public_domain_dump
|
50 |
+
- **Repository:** https://github.com/projectbenyehuda/public_domain_dump
|
51 |
+
- **Paper:**
|
52 |
+
- **Leaderboard:**
|
53 |
+
- **Point of Contact:**
|
54 |
+
|
55 |
+
### Dataset Summary
|
56 |
+
|
57 |
+
This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud), and in HTML files. The pseudocatalogue.csv file is a list of titles, authors, genres, and file paths, to help you process the dump.
|
58 |
+
|
59 |
+
The Releases tab contains a downloadable ZIP archive of the full release. The git repo can be used to track individual file changes, or for incremenetal updates. In the ZIPs, each format (plaintext, plaintext stripped of diacritics, and HTML) has a ZIP file containing one directory per author, with all the author's works under that directory.
|
60 |
+
|
61 |
+
To request changes or improvements to this dump, file an issue against this repository.
|
62 |
+
|
63 |
+
All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission.
|
64 |
+
|
65 |
+
If you would like to give credit, please credit "Project Ben-Yehuda volunteers", and include a link to the site. We'd also love to hear about the uses you've made of this dump, as it encourages us to keep producing the dump. E-mail us with a brief description (and links, if/as appropriate) of your re-use, at [email protected].
|
66 |
+
|
67 |
+
There are 10078 files, 3181136 lines
|
68 |
+
|
69 |
+
Data Annotation:
|
70 |
+
|
71 |
+
### Supported Tasks and Leaderboards
|
72 |
+
|
73 |
+
[More Information Needed]
|
74 |
+
|
75 |
+
### Languages
|
76 |
+
|
77 |
+
Hebrew
|
78 |
+
|
79 |
+
## Dataset Structure
|
80 |
+
|
81 |
+
Sample:
|
82 |
+
```
|
83 |
+
'authors' = {str} 'אחד העם'
|
84 |
+
'genre' = {str} 'מאמרים ומסות'
|
85 |
+
'id' = {int} 10
|
86 |
+
'original_language' = {str} ''
|
87 |
+
'source_edition' = {str} ''
|
88 |
+
'text' = {str} '\n\n\n\t\n\tחצי-נחמה\n\t\n\n\n\n1\n\nבין כל הצרות שנתחדשו עלינו בעת האחרונה תעשׂה ביחוד רושם מעציב בלב כל איש ישׂראל התחדשות ‘עלילת־הדם’. העלילה הנתעבה הזאת, בכל יָשנה, היתה ותהיה תמיד בעינינו כחדשה, ומימי הבינים ועד עתה תצטין בפעולתה החזקה על רוח עמנו, לא רק במקום המע
|
89 |
+
'title' = {str} 'חצי-נחמה'
|
90 |
+
'translators' = {str} ''
|
91 |
+
'url' = {str} 'https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/txt/p23/m10.txt'
|
92 |
+
```
|
93 |
+
|
94 |
+
### Data Instances
|
95 |
+
|
96 |
+
[More Information Needed]
|
97 |
+
|
98 |
+
### Data Fields
|
99 |
+
|
100 |
+
- `authors`
|
101 |
+
- `genre`
|
102 |
+
- `id`
|
103 |
+
- `original_language`
|
104 |
+
- `source_edition`
|
105 |
+
- `text`
|
106 |
+
- `title`
|
107 |
+
- `translators`
|
108 |
+
- `url`
|
109 |
+
|
110 |
+
### Data Splits
|
111 |
+
|
112 |
+
| | train |
|
113 |
+
|--------------------------|--------|
|
114 |
+
| corpus | 10078 |
|
115 |
+
|
116 |
+
|
117 |
+
## Dataset Creation
|
118 |
+
|
119 |
+
### Curation Rationale
|
120 |
+
|
121 |
+
[More Information Needed]
|
122 |
+
|
123 |
+
### Source Data
|
124 |
+
|
125 |
+
#### Initial Data Collection and Normalization
|
126 |
+
|
127 |
+
|
128 |
+
#### Who are the source language producers?
|
129 |
+
|
130 |
+
[More Information Needed]
|
131 |
+
|
132 |
+
Researchers
|
133 |
+
|
134 |
+
### Personal and Sensitive Information
|
135 |
+
|
136 |
+
[More Information Needed]
|
137 |
+
|
138 |
+
## Considerations for Using the Data
|
139 |
+
|
140 |
+
### Social Impact of Dataset
|
141 |
+
|
142 |
+
[More Information Needed]
|
143 |
+
|
144 |
+
### Discussion of Biases
|
145 |
+
|
146 |
+
[More Information Needed]
|
147 |
+
|
148 |
+
### Other Known Limitations
|
149 |
+
|
150 |
+
[More Information Needed]
|
151 |
+
|
152 |
+
## Additional Information
|
153 |
+
|
154 |
+
### Dataset Curators
|
155 |
+
|
156 |
+
[More Information Needed]
|
157 |
+
|
158 |
+
### Licensing Information
|
159 |
+
|
160 |
+
|
161 |
+
MIT License
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
166 |
+
of this software and associated documentation files (the "Software"), to deal
|
167 |
+
in the Software without restriction, including without limitation the rights
|
168 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
169 |
+
copies of the Software, and to permit persons to whom the Software is
|
170 |
+
furnished to do so, subject to the following conditions:
|
171 |
+
|
172 |
+
The above copyright notice and this permission notice shall be included in all
|
173 |
+
copies or substantial portions of the Software.
|
174 |
+
|
175 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
176 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
177 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
178 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
179 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
180 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
181 |
+
SOFTWARE.
|
182 |
+
|
183 |
+
### Citation Information
|
184 |
+
|
185 |
+
@article{,
|
186 |
+
author = {},
|
187 |
+
title = {Public domain texts from Project Ben-Yehuda},
|
188 |
+
journal = {},
|
189 |
+
url = {https://github.com/projectbenyehuda/public_domain_dump},
|
190 |
+
year = {2020},
|
191 |
+
}
|
dataset_infos.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
dummy/0.1.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:023bbedbb838583f164a20410e38bf7ad9fcc0922de6256da971dc8f64908349
|
3 |
+
size 1578
|
hebrew_projectbenyehuda.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""
|
2 |
+
|
3 |
+
from __future__ import absolute_import, division, print_function
|
4 |
+
|
5 |
+
import csv
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
|
9 |
+
|
10 |
+
_CITATION = """\
|
11 |
+
@article{,
|
12 |
+
author = {},
|
13 |
+
title = {Public domain texts from Project Ben-Yehuda},
|
14 |
+
journal = {},
|
15 |
+
url = {https://github.com/projectbenyehuda/public_domain_dump},
|
16 |
+
year = {2020},
|
17 |
+
}
|
18 |
+
|
19 |
+
"""
|
20 |
+
|
21 |
+
_DESCRIPTION = """\
|
22 |
+
This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud). The metadata (pseudocatalogue.csv) file is a list of titles, authors, genres, and file paths, to help you process the dump.
|
23 |
+
All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission.
|
24 |
+
There are 10078 files, 3181136 lines
|
25 |
+
"""
|
26 |
+
|
27 |
+
_ASSET_ROOT_URL = "https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/"
|
28 |
+
_STORAGE_API_ROOT_URL = "https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/txt/"
|
29 |
+
|
30 |
+
# download one by one file from github is too slow
|
31 |
+
|
32 |
+
_METADATA_URL = _ASSET_ROOT_URL + "pseudocatalogue.csv"
|
33 |
+
|
34 |
+
|
35 |
+
class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
|
36 |
+
"""Project Ben Yehuda dataset - books as plain text extracted from the Project Project Ben Yehuda library"""
|
37 |
+
|
38 |
+
VERSION = datasets.Version("0.1.0")
|
39 |
+
|
40 |
+
def _info(self):
|
41 |
+
return datasets.DatasetInfo(
|
42 |
+
# This is the description that will appear on the datasets page.
|
43 |
+
description=_DESCRIPTION,
|
44 |
+
# datasets.features.FeatureConnectors
|
45 |
+
features=datasets.Features(
|
46 |
+
{
|
47 |
+
"id": datasets.Value("int32"),
|
48 |
+
"url": datasets.Value("string"),
|
49 |
+
"title": datasets.Value("string"),
|
50 |
+
"authors": datasets.Value("string"),
|
51 |
+
"translators": datasets.Value("string"),
|
52 |
+
"original_language": datasets.Value("string"),
|
53 |
+
"genre": datasets.Value("string"),
|
54 |
+
"source_edition": datasets.Value("string"),
|
55 |
+
"text": datasets.Value("string"),
|
56 |
+
# These are the features of your dataset like images, labels ...
|
57 |
+
}
|
58 |
+
),
|
59 |
+
# If there's a common (input, target) tuple from the features,
|
60 |
+
# specify them here. They'll be used if as_supervised=True in
|
61 |
+
# builder.as_dataset.
|
62 |
+
supervised_keys=None,
|
63 |
+
# Homepage of the dataset for documentation
|
64 |
+
homepage="https://github.com/projectbenyehuda/public_domain_dump",
|
65 |
+
citation=_CITATION,
|
66 |
+
)
|
67 |
+
|
68 |
+
def _split_generators(self, dl_manager):
|
69 |
+
"""Returns SplitGenerators."""
|
70 |
+
|
71 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to
|
72 |
+
# download and extract URLs
|
73 |
+
|
74 |
+
metadata = dl_manager.download({"metadata": _METADATA_URL})
|
75 |
+
|
76 |
+
urls_to_download = dict()
|
77 |
+
ids = list()
|
78 |
+
with open(metadata["metadata"], encoding="utf-8") as csv_file:
|
79 |
+
for row in csv.DictReader(csv_file):
|
80 |
+
ids.append(row["ID"])
|
81 |
+
urls_to_download[row["ID"]] = _STORAGE_API_ROOT_URL + row["path"].strip("/") + ".txt"
|
82 |
+
|
83 |
+
downloaded_files = dl_manager.download(urls_to_download)
|
84 |
+
return [
|
85 |
+
datasets.SplitGenerator(
|
86 |
+
name=datasets.Split.TRAIN,
|
87 |
+
gen_kwargs={
|
88 |
+
"ids": ids,
|
89 |
+
"metadata_filepath": metadata["metadata"],
|
90 |
+
"filepaths": downloaded_files,
|
91 |
+
},
|
92 |
+
)
|
93 |
+
]
|
94 |
+
|
95 |
+
def _generate_examples(self, ids, metadata_filepath, filepaths):
|
96 |
+
"""Yields examples."""
|
97 |
+
|
98 |
+
with open(metadata_filepath, encoding="utf-8") as f:
|
99 |
+
metadata_dict = csv.DictReader(
|
100 |
+
f,
|
101 |
+
fieldnames=[
|
102 |
+
"_id",
|
103 |
+
"path",
|
104 |
+
"title",
|
105 |
+
"authors",
|
106 |
+
"translators",
|
107 |
+
"original_language",
|
108 |
+
"genre",
|
109 |
+
"source_edition",
|
110 |
+
],
|
111 |
+
)
|
112 |
+
indexed_metadata = {str(row["_id"]): row for row in metadata_dict}
|
113 |
+
|
114 |
+
for _id in ids:
|
115 |
+
data = indexed_metadata[_id]
|
116 |
+
filepath = filepaths[_id]
|
117 |
+
|
118 |
+
with open(filepath, encoding="utf-8") as f:
|
119 |
+
text = f.read()
|
120 |
+
|
121 |
+
_id = data["_id"]
|
122 |
+
title = data["title"]
|
123 |
+
url = data["path"].strip("/")
|
124 |
+
url = _STORAGE_API_ROOT_URL + url + ".txt"
|
125 |
+
authors = data["authors"]
|
126 |
+
translators = data["translators"]
|
127 |
+
original_language = data["original_language"]
|
128 |
+
genre = data["genre"]
|
129 |
+
source_edition = data["source_edition"]
|
130 |
+
|
131 |
+
yield _id, {
|
132 |
+
"id": _id,
|
133 |
+
"title": title,
|
134 |
+
"url": url,
|
135 |
+
"authors": authors,
|
136 |
+
"translators": translators,
|
137 |
+
"original_language": original_language,
|
138 |
+
"genre": genre,
|
139 |
+
"source_edition": source_edition,
|
140 |
+
"text": text,
|
141 |
+
}
|