File size: 3,185 Bytes
80b0c72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86

# @title Gold cwsd20 temp
import json
import datasets


_CITATION = """\
@misc{loureiro2021analysis,
      title={Analysis and Evaluation of Language Models for Word Sense Disambiguation}, 
      author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados},
      year={2021},
      eprint={2008.11608},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
"""

_DESCRIPTION = """\
The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia 
(nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed 
to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing 
from training), both quantitavely and qualitatively.
"""

path = "/content/CoarseWSD-20/apple/train.data.txt"



class CWSD20(datasets.GeneratorBasedBuilder):
    """TODO(WiCTSV): Short description of my dataset."""

    # TODO(WiCTSV): Set up version.
    VERSION = datasets.Version("1.0.0")

    def _info(self):
        # TODO(WiCTSV): Specifies the datasets.DatasetInfo object
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                    "idx": datasets.Value("int32"),
                    "sentence": datasets.Value("string"),
                    # "idx": datasets.Value("int32"),
                    # "word": datasets.Value("string"),
                    # "start1": datasets.Value("int32"),
                    # "start2": datasets.Value("int32"),
                    # "end1": datasets.Value("int32"),
                    # "end2": datasets.Value("int32"),
                    # "label": datasets.Value("int32")
                }
            ),
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset.
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage="https://github.com/google-research-datasets/boolean-questions",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # TODO(WiCTSV): Downloads the data and defines the splits
        # dl_manager is a datasets.download.DownloadManager that can be used to
        # download and extract URLs
        # urls_to_download = _URLS
        dl = path

        return [datasets.SplitGenerator(name=datasets.Split.TRAIN, 
                gen_kwargs={"ex": dl})]

    def _generate_examples(self, ex):
      """Yields examples."""
      with open(ex, encoding="utf-8") as exf:
          for id_, exi in enumerate(exf):
            example = {}
            # 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
            parts = exi.split("\t")
            idx = parts[0]
            sent = parts[1]
            example["sentence"] = sent
            example["idx"] = idx
            
            yield id_, example