|
"""XL-Sum-FI Finnish abstractive summarization dataset based on machine translation of the XL-Sum dataset""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
|
|
Please cite the article and also acknowledge Filip Ginter / TurkuNLP for the machine translated version |
|
|
|
@inproceedings{hasan-etal-2021-xl, |
|
title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages", |
|
author = "Hasan, Tahmid and |
|
Bhattacharjee, Abhik and |
|
Islam, Md. Saiful and |
|
Mubasshir, Kazi and |
|
Li, Yuan-Fang and |
|
Kang, Yong-Bin and |
|
Rahman, M. Sohel and |
|
Shahriyar, Rifat", |
|
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
month = aug, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.findings-acl.413", |
|
pages = "4693--4703", |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset is a DeepL -based machine translation of a part of the English section of the XLSum dataset:[https://github.com/csebuetnlp/xl-sum](https://github.com/csebuetnlp/xl-sum) In the present version, only examples where the full version is at most 10x the summary in length are included. We might translate more later. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/TurkuNLP/xlsum-fi" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)" |
|
|
|
_URL = "https://huggingface.co/datasets/TurkuNLP/xlsum-fi/resolve/main/data/{}_XLSum-fi_v{}.tar.bz2" |
|
|
|
_LANGUAGES = [ |
|
"finnish", |
|
] |
|
|
|
|
|
class Xlsum(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("2.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="{}".format(lang), |
|
version=datasets.Version("2.0.0") |
|
) |
|
for lang in _LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
version=self.VERSION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
lang = str(self.config.name) |
|
url = _URL.format(lang, self.VERSION.version_str[:-2]) |
|
|
|
data_dir = dl_manager.download_and_extract(url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_train.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_test.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_val.jsonl"), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples as (key, example) tuples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
for idx_, row in enumerate(f): |
|
data = json.loads(row) |
|
yield idx_, { |
|
"id": data["id"], |
|
"url": data["url"], |
|
"title": data["title"], |
|
"summary": data["summary"], |
|
"text": data["text"], |
|
} |
|
|