fginter commited on
Commit
4297ad9
·
1 Parent(s): abb0eec

loading script

Browse files
Files changed (1) hide show
  1. xlsum-fi.py +118 -0
xlsum-fi.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """XL-Sum-FI Finnish abstractive summarization dataset based on machine translation of the XL-Sum dataset"""
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+
12
+ Please cite the article and also acknowledge Filip Ginter / TurkuNLP for the machine translated version
13
+
14
+ @inproceedings{hasan-etal-2021-xl,
15
+ title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages",
16
+ author = "Hasan, Tahmid and
17
+ Bhattacharjee, Abhik and
18
+ Islam, Md. Saiful and
19
+ Mubasshir, Kazi and
20
+ Li, Yuan-Fang and
21
+ Kang, Yong-Bin and
22
+ Rahman, M. Sohel and
23
+ Shahriyar, Rifat",
24
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
25
+ month = aug,
26
+ year = "2021",
27
+ address = "Online",
28
+ publisher = "Association for Computational Linguistics",
29
+ url = "https://aclanthology.org/2021.findings-acl.413",
30
+ pages = "4693--4703",
31
+ }
32
+ """
33
+
34
+
35
+ _DESCRIPTION = """\
36
+ This dataset is a DeepL -based machine translation of a part of the English section of the XLSum dataset:[https://github.com/csebuetnlp/xl-sum](https://github.com/csebuetnlp/xl-sum) In the present version, only examples where the full version is at most 10x the summary in length are included. We might translate more later.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/TurkuNLP/xlsum-fi"
40
+
41
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
42
+
43
+ _URL = "https://huggingface.co/datasets/TurkuNLP/xlsum-fi/resolve/main/data/{}_XLSum-fi_v{}.tar.bz2"
44
+
45
+ _LANGUAGES = [
46
+ "finnish",
47
+ ]
48
+
49
+
50
+ class Xlsum(datasets.GeneratorBasedBuilder):
51
+ VERSION = datasets.Version("2.0.0")
52
+
53
+ BUILDER_CONFIGS = [
54
+ datasets.BuilderConfig(
55
+ name="{}".format(lang),
56
+ version=datasets.Version("2.0.0")
57
+ )
58
+ for lang in _LANGUAGES
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "id": datasets.Value("string"),
67
+ "url": datasets.Value("string"),
68
+ "title": datasets.Value("string"),
69
+ "summary": datasets.Value("string"),
70
+ "text": datasets.Value("string"),
71
+ }
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ citation=_CITATION,
76
+ license=_LICENSE,
77
+ version=self.VERSION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
82
+ lang = str(self.config.name)
83
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
84
+
85
+ data_dir = dl_manager.download_and_extract(url)
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN,
89
+ gen_kwargs={
90
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.VALIDATION,
101
+ gen_kwargs={
102
+ "filepath": os.path.join(data_dir, lang + "_val.jsonl"),
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def _generate_examples(self, filepath):
108
+ """Yields examples as (key, example) tuples."""
109
+ with open(filepath, encoding="utf-8") as f:
110
+ for idx_, row in enumerate(f):
111
+ data = json.loads(row)
112
+ yield idx_, {
113
+ "id": data["id"],
114
+ "url": data["url"],
115
+ "title": data["title"],
116
+ "summary": data["summary"],
117
+ "text": data["text"],
118
+ }