File size: 4,148 Bytes
80b0c72 1f66637 80b0c72 8e27a89 80b0c72 f35b9c1 d48a77a 80b0c72 d48a77a 80b0c72 d48a77a af0cb82 80b0c72 d48a77a 80b0c72 6184e5b 8e27a89 1f66637 8e27a89 80b0c72 af6aadf af0cb82 af6aadf 8e27a89 af6aadf 80b0c72 8e27a89 80b0c72 4a33343 7ff9e0f 80b0c72 af0cb82 42e8ab0 af0cb82 4a33343 80b0c72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import json
import datasets
import os
_CITATION = """\
@misc{loureiro2021analysis,
title={Analysis and Evaluation of Language Models for Word Sense Disambiguation},
author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados},
year={2021},
eprint={2008.11608},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia
(nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed
to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing
from training), both quantitavely and qualitatively.
"""
_URL = "https://huggingface.co/datasets/kiamehr74/CoarseWSD-20/raw/main/data"
_WORDS = ["apple", "arm", "bank", "bass",
"bow", "chair", "club", "crane",
"deck", "digit", "hood", "java",
"mole", "pitcher", "pound", "seal",
"spring", "square", "trunk", "yard"]
class CWSD20(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
# TODO: Set up version.
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [datasets.BuilderConfig(name=word, description=_DESCRIPTION) for word in _WORDS]
def _info(self):
dl_manager = datasets.DownloadManager()
cmap_path = dl_manager.download_and_extract(
url_or_urls=os.path.join(_URL, self.config.name,
"classes_map.txt")
)
with open(cmap_path) as cmap_f:
cmap = json.load(cmap_f)
label_classes = [cmap[str(k)] for k in range(len(cmap))]
# Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"idx": datasets.Value("int32"),
"sentence": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=label_classes)
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://github.com/danlou/bert-disambiguation.git",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl = {
"train_ex":dl_manager.download_and_extract(
url_or_urls=os.path.join(_URL, self.config.name,
"train.data.txt")
),
"train_lb":dl_manager.download_and_extract(
url_or_urls=os.path.join(_URL, self.config.name,
"train.gold.txt")
),
"test_ex":dl_manager.download_and_extract(
url_or_urls=os.path.join(_URL, self.config.name,
"test.data.txt")
),
"test_lb":dl_manager.download_and_extract(
url_or_urls=os.path.join(_URL, self.config.name,
"test.gold.txt")
)
}
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={"ex": dl["train_ex"], "lb":dl["train_lb"] }),
datasets.SplitGenerator(name=datasets.Split.TEST,
gen_kwargs={"ex": dl["test_ex"], "lb":dl["test_lb"] })]
def _generate_examples(self, ex, lb):
"""Yields examples."""
with open(ex, encoding="utf-8") as exf:
with open(lb, encoding="utf-8") as lbf:
for id_, (exi, lbi) in enumerate(zip(exf, lbf)):
example = {}
parts = exi.split("\t")
idx = parts[0]
sent = parts[1]
example["sentence"] = sent.strip()
example["idx"] = idx
example["label"] = int(lbi)
yield id_, example |