kiamehr74 commited on
Commit
1e733d5
·
1 Parent(s): 3e41160

script name changed

Browse files
Files changed (1) hide show
  1. coarsewsd_20.py +86 -0
coarsewsd_20.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # @title Gold cwsd20 temp
3
+ import json
4
+ import datasets
5
+
6
+
7
+ _CITATION = """\
8
+ @misc{loureiro2021analysis,
9
+ title={Analysis and Evaluation of Language Models for Word Sense Disambiguation},
10
+ author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados},
11
+ year={2021},
12
+ eprint={2008.11608},
13
+ archivePrefix={arXiv},
14
+ primaryClass={cs.CL}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """\
19
+ The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia
20
+ (nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed
21
+ to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing
22
+ from training), both quantitavely and qualitatively.
23
+ """
24
+
25
+ path = "/content/CoarseWSD-20/data/apple/train.data.txt"
26
+
27
+
28
+
29
+ class CWSD20(datasets.GeneratorBasedBuilder):
30
+ """TODO(WiCTSV): Short description of my dataset."""
31
+
32
+ # TODO(WiCTSV): Set up version.
33
+ VERSION = datasets.Version("1.0.0")
34
+
35
+ def _info(self):
36
+ # TODO(WiCTSV): Specifies the datasets.DatasetInfo object
37
+ return datasets.DatasetInfo(
38
+ # This is the description that will appear on the datasets page.
39
+ description=_DESCRIPTION,
40
+ # datasets.features.FeatureConnectors
41
+ features=datasets.Features(
42
+ {
43
+ "idx": datasets.Value("int32"),
44
+ "sentence": datasets.Value("string"),
45
+ # "idx": datasets.Value("int32"),
46
+ # "word": datasets.Value("string"),
47
+ # "start1": datasets.Value("int32"),
48
+ # "start2": datasets.Value("int32"),
49
+ # "end1": datasets.Value("int32"),
50
+ # "end2": datasets.Value("int32"),
51
+ # "label": datasets.Value("int32")
52
+ }
53
+ ),
54
+ # If there's a common (input, target) tuple from the features,
55
+ # specify them here. They'll be used if as_supervised=True in
56
+ # builder.as_dataset.
57
+ supervised_keys=None,
58
+ # Homepage of the dataset for documentation
59
+ homepage="https://github.com/google-research-datasets/boolean-questions",
60
+ citation=_CITATION,
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ """Returns SplitGenerators."""
65
+ # TODO(WiCTSV): Downloads the data and defines the splits
66
+ # dl_manager is a datasets.download.DownloadManager that can be used to
67
+ # download and extract URLs
68
+ # urls_to_download = _URLS
69
+ dl = path
70
+
71
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
72
+ gen_kwargs={"ex": dl})]
73
+
74
+ def _generate_examples(self, ex):
75
+ """Yields examples."""
76
+ with open(ex, encoding="utf-8") as exf:
77
+ for id_, exi in enumerate(exf):
78
+ example = {}
79
+ # 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
80
+ parts = exi.split("\t")
81
+ idx = parts[0]
82
+ sent = parts[1]
83
+ example["sentence"] = sent
84
+ example["idx"] = idx
85
+
86
+ yield id_, example