CoarseWSD-20 / coarsewsd_20.py
kiamehr74's picture
first commit (data files added)
80b0c72
raw
history blame
3.19 kB
# @title Gold cwsd20 temp
import json
import datasets
_CITATION = """\
@misc{loureiro2021analysis,
title={Analysis and Evaluation of Language Models for Word Sense Disambiguation},
author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados},
year={2021},
eprint={2008.11608},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia
(nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed
to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing
from training), both quantitavely and qualitatively.
"""
path = "/content/CoarseWSD-20/apple/train.data.txt"
class CWSD20(datasets.GeneratorBasedBuilder):
"""TODO(WiCTSV): Short description of my dataset."""
# TODO(WiCTSV): Set up version.
VERSION = datasets.Version("1.0.0")
def _info(self):
# TODO(WiCTSV): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"idx": datasets.Value("int32"),
"sentence": datasets.Value("string"),
# "idx": datasets.Value("int32"),
# "word": datasets.Value("string"),
# "start1": datasets.Value("int32"),
# "start2": datasets.Value("int32"),
# "end1": datasets.Value("int32"),
# "end2": datasets.Value("int32"),
# "label": datasets.Value("int32")
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/google-research-datasets/boolean-questions",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(WiCTSV): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
# urls_to_download = _URLS
dl = path
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={"ex": dl})]
def _generate_examples(self, ex):
"""Yields examples."""
with open(ex, encoding="utf-8") as exf:
for id_, exi in enumerate(exf):
example = {}
# 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
parts = exi.split("\t")
idx = parts[0]
sent = parts[1]
example["sentence"] = sent
example["idx"] = idx
yield id_, example