|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
This file provides a HuggingFace dataset loader implementation for |
|
the ParaDocs dataset |
|
ParaDocs is a multilingual machine translation dataset that has |
|
labelled document annotations for ParaCrawl, NewsCommentary, and |
|
Europarl data which can be used to create parallel document |
|
datasets for training of context-aware machine translation models. |
|
""" |
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import re |
|
import pathlib |
|
from pathlib import Path |
|
import yaml |
|
from ast import literal_eval |
|
|
|
import datasets |
|
|
|
import gzip |
|
try: |
|
import lzma as xz |
|
except ImportError: |
|
import pylzma as xz |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
ParaDocs is a multilingual machine translation dataset that has |
|
labelled document annotations for ParaCrawl, NewsCommentary, and |
|
Europarl data which can be used to create parallel document |
|
datasets for training of context-aware machine translation models. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/jhu-clsp/paradocs" |
|
|
|
_LICENSE = "cc-by-sa-4.0" |
|
|
|
_URL = "https://huggingface.co/datasets/jhu-clsp/paradocs" |
|
|
|
|
|
|
|
file_list_url = "https://huggingface.co/datasets/jhu-clsp/paradocs/raw/main/files.yml" |
|
import urllib.request |
|
with urllib.request.urlopen(file_list_url) as f: |
|
try: |
|
fnames = yaml.safe_load(f) |
|
except yaml.YAMLError as exc: |
|
print("Error loading the file paths for the dataset splits. Aborting.") |
|
exit(1) |
|
|
|
_DATA_URL = fnames['fnames'] |
|
|
|
_VARIANTS = list(_DATA_URL.keys()) |
|
|
|
|
|
class ParaDocs(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"src": datasets.Value("string"), |
|
"tgt": datasets.Value("string"), |
|
"sim_score_one" : datasets.Value("float32"), |
|
"sim_score_two": datasets.Value("float32"), |
|
"collection": datasets.Value("string"), |
|
"src_paragraph_id": datasets.Value("string"), |
|
"tgt_paragraph_id": datasets.Value("string"), |
|
"src_sentence_id": datasets.Value("string"), |
|
"tgt_sentence_id": datasets.Value("string"), |
|
"src_start_id": datasets.Value("string"), |
|
"src_end_id": datasets.Value("string"), |
|
"tgt_start_id": datasets.Value("string"), |
|
"tgt_end_id": datasets.Value("string"), |
|
"src_lid_prob": datasets.Value("float32"), |
|
"tgt_lid_prob": datasets.Value("float32"), |
|
"duplication_count": datasets.Value("int64"), |
|
"src_docid": datasets.Value("string"), |
|
"tgt_docid": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_sources = {self.config.name: _DATA_URL[self.config.name]} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name="train", |
|
gen_kwargs={ |
|
"filepaths": dl_manager.download(data_sources[lang]) |
|
} |
|
) |
|
for lang |
|
in data_sources |
|
] |
|
|
|
def _get_qa_pair_list_features(self, qa_pair, feature_name): |
|
res = [] |
|
|
|
if feature_name in qa_pair: |
|
if qa_pair[feature_name]: |
|
return qa_pair[feature_name] |
|
else: |
|
if feature_name.startswith('en'): |
|
feature_name = '_'.join(feature_name.split('_')[1:]) |
|
return self._get_qa_pair_list_features(qa_pair, feature_name) |
|
|
|
return res |
|
|
|
def _generate_examples(self, filepaths): |
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
id_ = 0 |
|
for filepath in filepaths: |
|
|
|
try: |
|
with gzip.open(filepath, "rt", encoding="utf-8") as f: |
|
rstream = csv.DictReader(f, |
|
delimiter='\t', |
|
fieldnames = [ |
|
"src", |
|
"tgt", |
|
"sim_score_one", |
|
"sim_score_two", |
|
"collection", |
|
"src_paragraph_id", |
|
"tgt_paragraph_id", |
|
"src_sentence_id", |
|
"tgt_sentence_id", |
|
"src_start_id", |
|
"src_end_id", |
|
"tgt_start_id", |
|
"tgt_end_id", |
|
"src_lid_prob", |
|
"tgt_lid_prob", |
|
"duplication_count", |
|
"src_docid", |
|
"tgt_docid" |
|
], |
|
quoting=csv.QUOTE_NONE |
|
) |
|
for example in rstream: |
|
yield id_, example |
|
id_ += 1 |
|
except Exception as e: |
|
print(e, filepath) |
|
print("Error reading file:", filepath) |
|
|
|
|