|
|
|
import os |
|
import json |
|
import datasets |
|
import pandas as pd |
|
|
|
_DESCRIPTION = """datasets-for-simcse""" |
|
|
|
_CITATION = '' |
|
|
|
GITHUB_HOME = '' |
|
|
|
class DatasetsForSimCSEConfig(datasets.BuilderConfig): |
|
|
|
def __init__(self, features, data_url, citation, url, label_classes=(0, 1), **kwargs): |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.features = features |
|
self.label_classes = label_classes |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
|
|
|
|
class DatasetsForSimCSE(datasets.GeneratorBasedBuilder): |
|
"""The Natural Language Inference Chinese(NLI_zh) Corpus.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
DatasetsForSimCSEConfig( |
|
name="nli_for_simcse", |
|
description=_DESCRIPTION, |
|
features=datasets.Features({"sent0": datasets.Value("string"), |
|
"sent1": datasets.Value("string"), |
|
"hard_neg": datasets.Value("string")}), |
|
data_url='https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/nli_for_simcse.csv', |
|
citation=_CITATION, |
|
url=GITHUB_HOME, |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=self.config.features, |
|
homepage=self.config.url, |
|
citation=self.config.citation, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
filepath = dl_manager.download(self.config.data_url) |
|
return [datasets.SplitGenerator( |
|
name='train', |
|
gen_kwargs={ |
|
"filepath": filepath, |
|
})] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
df = pd.read_csv(filepath, sep=',') |
|
for idx, row in df.iterrows(): |
|
context = {'sent0': row['sent0'], 'sent1': row['sent1'], 'hard_neg': row['hard_neg']} |
|
yield idx, context |