Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
b75af86
1 Parent(s): fc5fff6

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (cd230d3eb6990eb7f41f51fb5b3f98a8339c4f41)
- Add 'en-pt' config data files (ee2c9e52a3e2d7e39a4e3a111578517bf9150f8b)
- Add 'en-pt-es' config data files (8ed6ec74c8946f7c9e6792bb3c5b0b9d8a386b8c)
- Delete loading script (b1f0cde511275292095af5238c81f80f464ef200)

README.md CHANGED
@@ -18,8 +18,11 @@ source_datasets:
18
  task_categories:
19
  - translation
20
  task_ids: []
21
- paperswithcode_id: null
22
  pretty_name: SciELO
 
 
 
 
23
  dataset_info:
24
  - config_name: en-es
25
  features:
@@ -31,10 +34,10 @@ dataset_info:
31
  - es
32
  splits:
33
  - name: train
34
- num_bytes: 71777213
35
  num_examples: 177782
36
- download_size: 22965217
37
- dataset_size: 71777213
38
  - config_name: en-pt
39
  features:
40
  - name: translation
@@ -45,10 +48,10 @@ dataset_info:
45
  - pt
46
  splits:
47
  - name: train
48
- num_bytes: 1032669686
49
  num_examples: 2828917
50
- download_size: 322726075
51
- dataset_size: 1032669686
52
  - config_name: en-pt-es
53
  features:
54
  - name: translation
@@ -60,14 +63,23 @@ dataset_info:
60
  - es
61
  splits:
62
  - name: train
63
- num_bytes: 147472132
64
  num_examples: 255915
65
- download_size: 45556562
66
- dataset_size: 147472132
67
- config_names:
68
- - en-es
69
- - en-pt
70
- - en-pt-es
 
 
 
 
 
 
 
 
 
71
  ---
72
 
73
  # Dataset Card for SciELO
 
18
  task_categories:
19
  - translation
20
  task_ids: []
 
21
  pretty_name: SciELO
22
+ config_names:
23
+ - en-es
24
+ - en-pt
25
+ - en-pt-es
26
  dataset_info:
27
  - config_name: en-es
28
  features:
 
34
  - es
35
  splits:
36
  - name: train
37
+ num_bytes: 71777069
38
  num_examples: 177782
39
+ download_size: 39938803
40
+ dataset_size: 71777069
41
  - config_name: en-pt
42
  features:
43
  - name: translation
 
48
  - pt
49
  splits:
50
  - name: train
51
+ num_bytes: 1032667422
52
  num_examples: 2828917
53
+ download_size: 565678928
54
+ dataset_size: 1032667422
55
  - config_name: en-pt-es
56
  features:
57
  - name: translation
 
63
  - es
64
  splits:
65
  - name: train
66
+ num_bytes: 147471820
67
  num_examples: 255915
68
+ download_size: 80329522
69
+ dataset_size: 147471820
70
+ configs:
71
+ - config_name: en-es
72
+ data_files:
73
+ - split: train
74
+ path: en-es/train-*
75
+ - config_name: en-pt
76
+ data_files:
77
+ - split: train
78
+ path: en-pt/train-*
79
+ - config_name: en-pt-es
80
+ data_files:
81
+ - split: train
82
+ path: en-pt-es/train-*
83
  ---
84
 
85
  # Dataset Card for SciELO
en-es/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:143ad22a514c530dc56e50346ecacc18447ac1dfffdd2ea085c4d85475fe677a
3
+ size 39938803
en-pt-es/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e186aad1c4c0c629330055a7540596085ff811bd637bc00bb605da35b9d898
3
+ size 80329522
en-pt/train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7275ed1077c3307c878aee1847d44379b9b826a7510a00ae77e7c8ff1f088924
3
+ size 188756689
en-pt/train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e286a4d195ca56bf7d811200fe661af17701e0cb385f76c01ecd17702a26bb2
3
+ size 188307047
en-pt/train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ec47130a6243d67ee39ec76bd0030a979bf285c438ac19042bdce668b0f4dfa
3
+ size 188615192
scielo.py DELETED
@@ -1,121 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @inproceedings{soares2018large,
23
- title={A Large Parallel Corpus of Full-Text Scientific Articles},
24
- author={Soares, Felipe and Moreira, Viviane and Becker, Karin},
25
- booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC-2018)},
26
- year={2018}
27
- }
28
- """
29
-
30
-
31
- _DESCRIPTION = """\
32
- A parallel corpus of full-text scientific articles collected from Scielo database in the following languages: \
33
- English, Portuguese and Spanish. The corpus is sentence aligned for all language pairs, \
34
- as well as trilingual aligned for a small subset of sentences. Alignment was carried out using the Hunalign algorithm.
35
- """
36
-
37
-
38
- _HOMEPAGE = "https://sites.google.com/view/felipe-soares/datasets#h.p_92uSCyAjWSRB"
39
-
40
- _LANGUAGES = ["en-es", "en-pt", "en-pt-es"]
41
-
42
- _URLS = {
43
- "en-es": "https://ndownloader.figstatic.com/files/14019287",
44
- "en-pt": "https://ndownloader.figstatic.com/files/14019308",
45
- "en-pt-es": "https://ndownloader.figstatic.com/files/14019293",
46
- }
47
-
48
-
49
- class Scielo(datasets.GeneratorBasedBuilder):
50
- """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO"""
51
-
52
- VERSION = datasets.Version("1.0.0")
53
-
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="en-es", version=datasets.Version("1.0.0"), description="English-Spanish"),
56
- datasets.BuilderConfig(name="en-pt", version=datasets.Version("1.0.0"), description="English-Portuguese"),
57
- datasets.BuilderConfig(
58
- name="en-pt-es", version=datasets.Version("1.0.0"), description="English-Portuguese-Spanish"
59
- ),
60
- ]
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
67
- ),
68
- supervised_keys=None,
69
- homepage=_HOMEPAGE,
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- archive = dl_manager.download(_URLS[self.config.name])
76
- lang_pair = self.config.name.split("-")
77
- fname = self.config.name.replace("-", "_")
78
-
79
- if self.config.name == "en-pt-es":
80
- return [
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TRAIN,
83
- gen_kwargs={
84
- "source_file": f"{fname}.en",
85
- "target_file": f"{fname}.pt",
86
- "target_file_2": f"{fname}.es",
87
- "files": dl_manager.iter_archive(archive),
88
- },
89
- ),
90
- ]
91
-
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN,
95
- gen_kwargs={
96
- "source_file": f"{fname}.{lang_pair[0]}",
97
- "target_file": f"{fname}.{lang_pair[1]}",
98
- "files": dl_manager.iter_archive(archive),
99
- },
100
- ),
101
- ]
102
-
103
- def _generate_examples(self, source_file, target_file, files, target_file_2=None):
104
- for path, f in files:
105
- if path == source_file:
106
- source_sentences = f.read().decode("utf-8").split("\n")
107
- elif path == target_file:
108
- target_sentences = f.read().decode("utf-8").split("\n")
109
- elif self.config.name == "en-pt-es" and path == target_file_2:
110
- target_sentences_2 = f.read().decode("utf-8").split("\n")
111
-
112
- if self.config.name == "en-pt-es":
113
- source, target, target_2 = tuple(self.config.name.split("-"))
114
- for idx, (l1, l2, l3) in enumerate(zip(source_sentences, target_sentences, target_sentences_2)):
115
- result = {"translation": {source: l1, target: l2, target_2: l3}}
116
- yield idx, result
117
- else:
118
- source, target = tuple(self.config.name.split("-"))
119
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
120
- result = {"translation": {source: l1, target: l2}}
121
- yield idx, result