Datasets:
Convert dataset to Parquet (#2)
Browse files- Convert dataset to Parquet (27a86cff7fb1fb8c795447c1a1bc1740cf377133)
- Add de-es data files (99db408c91b0cbcae6e679635bf0317f1e138c55)
- Add de-fr data files (7cd01de5dfff112b57aad16c415e70d54ce32d7d)
- Add de-sv data files (ab170e0ebdff26ad1613e83021dfc75308b3a55d)
- Add en-es data files (13e321b02ba27116002d6389f8ff89510f72a471)
- Add en-fr data files (cfb82fc4492dd49c0c97972b178fb4e354b8d3ab)
- Add en-sv data files (16a38766525b85be04e5b504fe574cb3a352c8f4)
- Add es-fr data files (fcc3b920eb01ab279975ef09bf4fe2a7cf356704)
- Add es-sv data files (bdf6676d9a44000cd224324b64971449b9cf88a0)
- Add fr-sv data files (9941bfca3199c0312b680967be57ba7d8ce7be58)
- Delete loading script (a37dff01933e0b2626e92214ecccfab7d9100577)
- README.md +82 -42
- de-en/train-00000-of-00001.parquet +3 -0
- de-es/train-00000-of-00001.parquet +3 -0
- de-fr/train-00000-of-00001.parquet +3 -0
- de-sv/train-00000-of-00001.parquet +3 -0
- en-es/train-00000-of-00001.parquet +3 -0
- en-fr/train-00000-of-00001.parquet +3 -0
- en-sv/train-00000-of-00001.parquet +3 -0
- es-fr/train-00000-of-00001.parquet +3 -0
- es-sv/train-00000-of-00001.parquet +3 -0
- fr-sv/train-00000-of-00001.parquet +3 -0
- opus_rf.py +0 -139
README.md
CHANGED
@@ -20,8 +20,18 @@ source_datasets:
|
|
20 |
task_categories:
|
21 |
- translation
|
22 |
task_ids: []
|
23 |
-
paperswithcode_id: null
|
24 |
pretty_name: OpusRf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
dataset_info:
|
26 |
- config_name: de-en
|
27 |
features:
|
@@ -35,10 +45,10 @@ dataset_info:
|
|
35 |
- en
|
36 |
splits:
|
37 |
- name: train
|
38 |
-
num_bytes:
|
39 |
num_examples: 177
|
40 |
-
download_size:
|
41 |
-
dataset_size:
|
42 |
- config_name: de-es
|
43 |
features:
|
44 |
- name: id
|
@@ -51,10 +61,10 @@ dataset_info:
|
|
51 |
- es
|
52 |
splits:
|
53 |
- name: train
|
54 |
-
num_bytes:
|
55 |
num_examples: 24
|
56 |
-
download_size:
|
57 |
-
dataset_size:
|
58 |
- config_name: de-fr
|
59 |
features:
|
60 |
- name: id
|
@@ -67,10 +77,10 @@ dataset_info:
|
|
67 |
- fr
|
68 |
splits:
|
69 |
- name: train
|
70 |
-
num_bytes:
|
71 |
num_examples: 173
|
72 |
-
download_size:
|
73 |
-
dataset_size:
|
74 |
- config_name: de-sv
|
75 |
features:
|
76 |
- name: id
|
@@ -83,10 +93,10 @@ dataset_info:
|
|
83 |
- sv
|
84 |
splits:
|
85 |
- name: train
|
86 |
-
num_bytes:
|
87 |
num_examples: 178
|
88 |
-
download_size:
|
89 |
-
dataset_size:
|
90 |
- config_name: en-es
|
91 |
features:
|
92 |
- name: id
|
@@ -99,10 +109,10 @@ dataset_info:
|
|
99 |
- es
|
100 |
splits:
|
101 |
- name: train
|
102 |
-
num_bytes:
|
103 |
num_examples: 25
|
104 |
-
download_size:
|
105 |
-
dataset_size:
|
106 |
- config_name: en-fr
|
107 |
features:
|
108 |
- name: id
|
@@ -115,10 +125,10 @@ dataset_info:
|
|
115 |
- fr
|
116 |
splits:
|
117 |
- name: train
|
118 |
-
num_bytes:
|
119 |
num_examples: 175
|
120 |
-
download_size:
|
121 |
-
dataset_size:
|
122 |
- config_name: en-sv
|
123 |
features:
|
124 |
- name: id
|
@@ -131,10 +141,10 @@ dataset_info:
|
|
131 |
- sv
|
132 |
splits:
|
133 |
- name: train
|
134 |
-
num_bytes:
|
135 |
num_examples: 180
|
136 |
-
download_size:
|
137 |
-
dataset_size:
|
138 |
- config_name: es-fr
|
139 |
features:
|
140 |
- name: id
|
@@ -147,10 +157,10 @@ dataset_info:
|
|
147 |
- fr
|
148 |
splits:
|
149 |
- name: train
|
150 |
-
num_bytes:
|
151 |
num_examples: 21
|
152 |
-
download_size:
|
153 |
-
dataset_size:
|
154 |
- config_name: es-sv
|
155 |
features:
|
156 |
- name: id
|
@@ -163,10 +173,10 @@ dataset_info:
|
|
163 |
- sv
|
164 |
splits:
|
165 |
- name: train
|
166 |
-
num_bytes:
|
167 |
num_examples: 28
|
168 |
-
download_size:
|
169 |
-
dataset_size:
|
170 |
- config_name: fr-sv
|
171 |
features:
|
172 |
- name: id
|
@@ -179,21 +189,51 @@ dataset_info:
|
|
179 |
- sv
|
180 |
splits:
|
181 |
- name: train
|
182 |
-
num_bytes:
|
183 |
num_examples: 175
|
184 |
-
download_size:
|
185 |
-
dataset_size:
|
186 |
-
|
187 |
-
- de-en
|
188 |
-
|
189 |
-
-
|
190 |
-
|
191 |
-
-
|
192 |
-
|
193 |
-
-
|
194 |
-
-
|
195 |
-
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
---
|
198 |
|
199 |
# Dataset Card for [Dataset Name]
|
|
|
20 |
task_categories:
|
21 |
- translation
|
22 |
task_ids: []
|
|
|
23 |
pretty_name: OpusRf
|
24 |
+
config_names:
|
25 |
+
- de-en
|
26 |
+
- de-es
|
27 |
+
- de-fr
|
28 |
+
- de-sv
|
29 |
+
- en-es
|
30 |
+
- en-fr
|
31 |
+
- en-sv
|
32 |
+
- es-fr
|
33 |
+
- es-sv
|
34 |
+
- fr-sv
|
35 |
dataset_info:
|
36 |
- config_name: de-en
|
37 |
features:
|
|
|
45 |
- en
|
46 |
splits:
|
47 |
- name: train
|
48 |
+
num_bytes: 38671
|
49 |
num_examples: 177
|
50 |
+
download_size: 25572
|
51 |
+
dataset_size: 38671
|
52 |
- config_name: de-es
|
53 |
features:
|
54 |
- name: id
|
|
|
61 |
- es
|
62 |
splits:
|
63 |
- name: train
|
64 |
+
num_bytes: 2304
|
65 |
num_examples: 24
|
66 |
+
download_size: 3690
|
67 |
+
dataset_size: 2304
|
68 |
- config_name: de-fr
|
69 |
features:
|
70 |
- name: id
|
|
|
77 |
- fr
|
78 |
splits:
|
79 |
- name: train
|
80 |
+
num_bytes: 41288
|
81 |
num_examples: 173
|
82 |
+
download_size: 26724
|
83 |
+
dataset_size: 41288
|
84 |
- config_name: de-sv
|
85 |
features:
|
86 |
- name: id
|
|
|
93 |
- sv
|
94 |
splits:
|
95 |
- name: train
|
96 |
+
num_bytes: 37402
|
97 |
num_examples: 178
|
98 |
+
download_size: 25101
|
99 |
+
dataset_size: 37402
|
100 |
- config_name: en-es
|
101 |
features:
|
102 |
- name: id
|
|
|
109 |
- es
|
110 |
splits:
|
111 |
- name: train
|
112 |
+
num_bytes: 2588
|
113 |
num_examples: 25
|
114 |
+
download_size: 3865
|
115 |
+
dataset_size: 2588
|
116 |
- config_name: en-fr
|
117 |
features:
|
118 |
- name: id
|
|
|
125 |
- fr
|
126 |
splits:
|
127 |
- name: train
|
128 |
+
num_bytes: 39491
|
129 |
num_examples: 175
|
130 |
+
download_size: 25966
|
131 |
+
dataset_size: 39491
|
132 |
- config_name: en-sv
|
133 |
features:
|
134 |
- name: id
|
|
|
141 |
- sv
|
142 |
splits:
|
143 |
- name: train
|
144 |
+
num_bytes: 35766
|
145 |
num_examples: 180
|
146 |
+
download_size: 24513
|
147 |
+
dataset_size: 35766
|
148 |
- config_name: es-fr
|
149 |
features:
|
150 |
- name: id
|
|
|
157 |
- fr
|
158 |
splits:
|
159 |
- name: train
|
160 |
+
num_bytes: 2507
|
161 |
num_examples: 21
|
162 |
+
download_size: 3789
|
163 |
+
dataset_size: 2507
|
164 |
- config_name: es-sv
|
165 |
features:
|
166 |
- name: id
|
|
|
173 |
- sv
|
174 |
splits:
|
175 |
- name: train
|
176 |
+
num_bytes: 3098
|
177 |
num_examples: 28
|
178 |
+
download_size: 4227
|
179 |
+
dataset_size: 3098
|
180 |
- config_name: fr-sv
|
181 |
features:
|
182 |
- name: id
|
|
|
189 |
- sv
|
190 |
splits:
|
191 |
- name: train
|
192 |
+
num_bytes: 38615
|
193 |
num_examples: 175
|
194 |
+
download_size: 25822
|
195 |
+
dataset_size: 38615
|
196 |
+
configs:
|
197 |
+
- config_name: de-en
|
198 |
+
data_files:
|
199 |
+
- split: train
|
200 |
+
path: de-en/train-*
|
201 |
+
- config_name: de-es
|
202 |
+
data_files:
|
203 |
+
- split: train
|
204 |
+
path: de-es/train-*
|
205 |
+
- config_name: de-fr
|
206 |
+
data_files:
|
207 |
+
- split: train
|
208 |
+
path: de-fr/train-*
|
209 |
+
- config_name: de-sv
|
210 |
+
data_files:
|
211 |
+
- split: train
|
212 |
+
path: de-sv/train-*
|
213 |
+
- config_name: en-es
|
214 |
+
data_files:
|
215 |
+
- split: train
|
216 |
+
path: en-es/train-*
|
217 |
+
- config_name: en-fr
|
218 |
+
data_files:
|
219 |
+
- split: train
|
220 |
+
path: en-fr/train-*
|
221 |
+
- config_name: en-sv
|
222 |
+
data_files:
|
223 |
+
- split: train
|
224 |
+
path: en-sv/train-*
|
225 |
+
- config_name: es-fr
|
226 |
+
data_files:
|
227 |
+
- split: train
|
228 |
+
path: es-fr/train-*
|
229 |
+
- config_name: es-sv
|
230 |
+
data_files:
|
231 |
+
- split: train
|
232 |
+
path: es-sv/train-*
|
233 |
+
- config_name: fr-sv
|
234 |
+
data_files:
|
235 |
+
- split: train
|
236 |
+
path: fr-sv/train-*
|
237 |
---
|
238 |
|
239 |
# Dataset Card for [Dataset Name]
|
de-en/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:486dc31e644104f0e77a8a226b8027cf414b170244d6382ef1f2cdcc4f60aa43
|
3 |
+
size 25572
|
de-es/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e7ed65cda31725a47ef0e4802b010d1a25cf83ac68efb13a1368ca7e2cf4f9d
|
3 |
+
size 3690
|
de-fr/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31b40e78b3510ffc453ee0f617a638da1eaa57b4aec9ddba7fe11591536b0e36
|
3 |
+
size 26724
|
de-sv/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a5d33665aff0984e4e3b62900683acd59ab360a7fec510a1a4474c0e7cf3f76
|
3 |
+
size 25101
|
en-es/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de1311edea8ad3b71c948671edcb3e3ac6aa97bca7fcd576a9f6ccc26e96af84
|
3 |
+
size 3865
|
en-fr/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69123d88a3e6f110d5a98d989c05e70a3f7f37a9aef01da046386c7aee5a35d8
|
3 |
+
size 25966
|
en-sv/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fa0b3a8142f997c06b534dd0dad44a6504a58b7410b57510e6d8efc0d608934
|
3 |
+
size 24513
|
es-fr/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e855aa057aa26776fa029c649f7e32fd794febd79e9278dabdfa872ca3e45d38
|
3 |
+
size 3789
|
es-sv/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fdf3fd4ead2942887b8a71df18eeb2b031d4ea0d9600c855caf9f598657515c4
|
3 |
+
size 4227
|
fr-sv/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7da0b69fcf0a000604e981ef6f47b2cf710e85f3306a37b536fc9748984ebcba
|
3 |
+
size 25822
|
opus_rf.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""
|
16 |
-
RF is a tiny parallel corpus of the Declarations of the Swedish Government and its translations.
|
17 |
-
|
18 |
-
5 languages, 10 bitexts
|
19 |
-
total number of files: 11
|
20 |
-
total number of tokens: 19.74k
|
21 |
-
total number of sentence fragments: 0.86k
|
22 |
-
"""
|
23 |
-
|
24 |
-
|
25 |
-
import os
|
26 |
-
|
27 |
-
import datasets
|
28 |
-
|
29 |
-
|
30 |
-
_CITATION = """\
|
31 |
-
@InProceedings{TIEDEMANN12.463,
|
32 |
-
author = {J{\"o}rg Tiedemann},
|
33 |
-
title = {Parallel Data, Tools and Interfaces in OPUS},
|
34 |
-
booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
|
35 |
-
year = {2012},
|
36 |
-
month = {may},
|
37 |
-
date = {23-25},
|
38 |
-
address = {Istanbul, Turkey},
|
39 |
-
editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
|
40 |
-
publisher = {European Language Resources Association (ELRA)},
|
41 |
-
isbn = {978-2-9517408-7-7},
|
42 |
-
language = {english}
|
43 |
-
}
|
44 |
-
"""
|
45 |
-
|
46 |
-
_DESCRIPTION = """\
|
47 |
-
RF is a tiny parallel corpus of the Declarations of the Swedish Government and its translations.
|
48 |
-
"""
|
49 |
-
|
50 |
-
_HOMEPAGE = "http://opus.nlpl.eu/RF.php"
|
51 |
-
|
52 |
-
_VERSION = "1.0.0"
|
53 |
-
_BASE_NAME = "RF.{}.{}"
|
54 |
-
_BASE_URL = "https://object.pouta.csc.fi/OPUS-RF/v1/moses/{}-{}.txt.zip"
|
55 |
-
|
56 |
-
_LANGUAGE_PAIRS = [
|
57 |
-
("de", "en"),
|
58 |
-
("de", "es"),
|
59 |
-
("de", "fr"),
|
60 |
-
("de", "sv"),
|
61 |
-
("en", "es"),
|
62 |
-
("en", "fr"),
|
63 |
-
("en", "sv"),
|
64 |
-
("es", "fr"),
|
65 |
-
("es", "sv"),
|
66 |
-
("fr", "sv"),
|
67 |
-
]
|
68 |
-
|
69 |
-
|
70 |
-
class OpusRFTranslationsConfig(datasets.BuilderConfig):
|
71 |
-
def __init__(self, *args, lang1=None, lang2=None, **kwargs):
|
72 |
-
super().__init__(
|
73 |
-
*args,
|
74 |
-
name=f"{lang1}-{lang2}",
|
75 |
-
**kwargs,
|
76 |
-
)
|
77 |
-
self.lang1 = lang1
|
78 |
-
self.lang2 = lang2
|
79 |
-
|
80 |
-
|
81 |
-
class OpusRF(datasets.GeneratorBasedBuilder):
|
82 |
-
"""RF is a tiny parallel corpus of the Declarations of the Swedish Government and its translations."""
|
83 |
-
|
84 |
-
BUILDER_CONFIGS = [
|
85 |
-
OpusRFTranslationsConfig(
|
86 |
-
lang1=lang1,
|
87 |
-
lang2=lang2,
|
88 |
-
description=f"Translating {lang1} to {lang2} or vice versa",
|
89 |
-
version=datasets.Version(_VERSION),
|
90 |
-
)
|
91 |
-
for lang1, lang2 in _LANGUAGE_PAIRS
|
92 |
-
]
|
93 |
-
BUILDER_CONFIG_CLASS = OpusRFTranslationsConfig
|
94 |
-
|
95 |
-
def _info(self):
|
96 |
-
return datasets.DatasetInfo(
|
97 |
-
description=_DESCRIPTION,
|
98 |
-
features=datasets.Features(
|
99 |
-
{
|
100 |
-
"id": datasets.Value("string"),
|
101 |
-
"translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
|
102 |
-
}
|
103 |
-
),
|
104 |
-
supervised_keys=None,
|
105 |
-
homepage=_HOMEPAGE,
|
106 |
-
citation=_CITATION,
|
107 |
-
)
|
108 |
-
|
109 |
-
def _split_generators(self, dl_manager):
|
110 |
-
"""Returns SplitGenerators."""
|
111 |
-
download_url = _BASE_URL.format(self.config.lang1, self.config.lang2)
|
112 |
-
path = dl_manager.download_and_extract(download_url)
|
113 |
-
return [
|
114 |
-
datasets.SplitGenerator(
|
115 |
-
name=datasets.Split.TRAIN,
|
116 |
-
gen_kwargs={"datapath": path},
|
117 |
-
)
|
118 |
-
]
|
119 |
-
|
120 |
-
def _generate_examples(self, datapath):
|
121 |
-
"""Yields examples."""
|
122 |
-
l1, l2 = self.config.lang1, self.config.lang2
|
123 |
-
folder = l1 + "-" + l2
|
124 |
-
l1_file = _BASE_NAME.format(folder, l1)
|
125 |
-
l2_file = _BASE_NAME.format(folder, l2)
|
126 |
-
l1_path = os.path.join(datapath, l1_file)
|
127 |
-
l2_path = os.path.join(datapath, l2_file)
|
128 |
-
with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
|
129 |
-
for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
|
130 |
-
x = x.strip()
|
131 |
-
y = y.strip()
|
132 |
-
result = (
|
133 |
-
sentence_counter,
|
134 |
-
{
|
135 |
-
"id": str(sentence_counter),
|
136 |
-
"translation": {l1: x, l2: y},
|
137 |
-
},
|
138 |
-
)
|
139 |
-
yield result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|