|
|
|
|
|
import json |
|
import datasets |
|
import os |
|
|
|
_CITATION = """\ |
|
@misc{loureiro2021analysis, |
|
title={Analysis and Evaluation of Language Models for Word Sense Disambiguation}, |
|
author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados}, |
|
year={2021}, |
|
eprint={2008.11608}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia |
|
(nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed |
|
to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing |
|
from training), both quantitavely and qualitatively. |
|
""" |
|
|
|
_URL = "https://huggingface.co/datasets/kiamehr74/CoarseWSD-20/raw/main/data" |
|
|
|
_WORDS = ["apple", "arm", "bank", "bass", |
|
"bow", "chair", "club", "crane", |
|
"deck", "digit", "hood", "java", |
|
"mole", "pitcher", "pound", "seal", |
|
"spring", "square", "trunk", "yard"] |
|
|
|
|
|
class CWSD20(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name=word, description=_DESCRIPTION) for word in _WORDS] |
|
|
|
def _info(self): |
|
dl_manager = datasets.DownloadManager() |
|
cmap_path = dl_manager.download_and_extract( |
|
url_or_urls=os.path.join(_URL, self.config.name, |
|
"classes_map.txt") |
|
) |
|
with open(cmap_path) as cmap_f: |
|
cmap = json.load(cmap_f) |
|
label_classes = [cmap[str(k)] for k in range(len(cmap))] |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"idx": datasets.Value("int32"), |
|
"sentence": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=label_classes) |
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage="https://github.com/danlou/bert-disambiguation.git", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl = { |
|
"train_ex":dl_manager.download_and_extract( |
|
url_or_urls=os.path.join(_URL, self.config.name, |
|
"train.data.txt") |
|
), |
|
"train_lb":dl_manager.download_and_extract( |
|
url_or_urls=os.path.join(_URL, self.config.name, |
|
"train.gold.txt") |
|
), |
|
"test_ex":dl_manager.download_and_extract( |
|
url_or_urls=os.path.join(_URL, self.config.name, |
|
"test.data.txt") |
|
), |
|
"test_lb":dl_manager.download_and_extract( |
|
url_or_urls=os.path.join(_URL, self.config.name, |
|
"test.gold.txt") |
|
) |
|
} |
|
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={"ex": dl["train_ex"], "lb":dl["train_lb"] }), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, |
|
gen_kwargs={"ex": dl["test_ex"], "lb":dl["test_lb"] })] |
|
|
|
def _generate_examples(self, ex, lb): |
|
"""Yields examples.""" |
|
with open(ex, encoding="utf-8") as exf: |
|
with open(lb, encoding="utf-8") as lbf: |
|
|
|
for id_, (exi, lbi) in enumerate(zip(exf, lbf)): |
|
example = {} |
|
parts = exi.split("\t") |
|
idx = parts[0] |
|
sent = parts[1] |
|
example["sentence"] = sent.strip() |
|
example["idx"] = idx |
|
example["label"] = int(lbi) |
|
|
|
yield id_, example |