import os import pathlib from typing import overload import datasets import json from datasets.info import DatasetInfo _VERSION = "0.0.1" _URL= "https://fcheck.fel.cvut.cz/downloads/NLI/anli_v1.0_cs_google_translate/R3/" _URLS = { "train": _URL + "train.jsonl", "validation": _URL + "dev.jsonl", "test": _URL + "test.jsonl" } _DESCRIPTION = """\ TODO: Anli_cs is a Czech translation of the Adversarial NLI dataset """ _CITATION = """\ todo """ _LABEL_CONVERSION = { "n": "NOT ENOUGH INFO", "e": "SUPPORTS", "c": "REFUTES" } datasets.utils.version.Version class AnliCs(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]), # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),}) "evidence": datasets.Value("string"), "claim": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, version=_VERSION, homepage="https://fcheck.fel.cvut.cz/dataset/", citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, { "filepath": downloaded_files["train"] }), datasets.SplitGenerator(datasets.Split.VALIDATION, { "filepath": downloaded_files["validation"] }), datasets.SplitGenerator(datasets.Split.TEST, { "filepath": downloaded_files["test"] }), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" key = 0 with open(filepath, encoding="utf-8") as f: for line in f: datapoint = json.loads(line) yield key, { "id": datapoint["uid"], "evidence": datapoint["context"], "claim": datapoint["hypothesis"], "label": _LABEL_CONVERSION[datapoint["label"]] } key += 1