kiamehr74 commited on
Commit
af0cb82
·
1 Parent(s): 1e733d5

script name changed

Browse files
Files changed (2) hide show
  1. CoarseWSD-20.py +22 -52
  2. coarsewsd_20.py +0 -86
CoarseWSD-20.py CHANGED
@@ -1,5 +1,5 @@
1
 
2
- # @title Gold cwsd20
3
  import json
4
  import datasets
5
 
@@ -22,25 +22,15 @@ to provide an ideal setting for evaluating WSD models (e.g. no senses in test se
22
  from training), both quantitavely and qualitatively.
23
  """
24
 
25
- path = "data/apple/train.data.txt"
26
 
27
- _PATHS = {
28
- "train_examples": "/content/train/train.data.txt",
29
- "train_labels": "/content/train/train.gold.txt",
30
- "dev_examples": "/content/dev/dev.data.txt",
31
- "dev_labels": "/content/dev/dev.gold.txt",
32
- "test_examples": "/content/test/test.data.txt",
33
- "test_labels": "/content/test/test.gold.txt",
34
- }
35
 
36
- def normalize_text(text):
37
- return text.replace(' .', '.').replace(' ,', ',').replace(" '", "'").replace(" ?", "?").replace(" !", "!")
38
 
39
  class CWSD20(datasets.GeneratorBasedBuilder):
40
  """TODO(WiCTSV): Short description of my dataset."""
41
 
42
  # TODO(WiCTSV): Set up version.
43
- VERSION = datasets.Version("3.5.8")
44
 
45
  def _info(self):
46
  # TODO(WiCTSV): Specifies the datasets.DatasetInfo object
@@ -50,15 +40,15 @@ class CWSD20(datasets.GeneratorBasedBuilder):
50
  # datasets.features.FeatureConnectors
51
  features=datasets.Features(
52
  {
53
- "sentence1": datasets.Value("string"),
54
- "sentence2": datasets.Value("string"),
55
  "idx": datasets.Value("int32"),
56
- "word": datasets.Value("string"),
57
- "start1": datasets.Value("int32"),
58
- "start2": datasets.Value("int32"),
59
- "end1": datasets.Value("int32"),
60
- "end2": datasets.Value("int32"),
61
- "label": datasets.Value("int32")
 
 
62
  }
63
  ),
64
  # If there's a common (input, target) tuple from the features,
@@ -76,41 +66,21 @@ class CWSD20(datasets.GeneratorBasedBuilder):
76
  # dl_manager is a datasets.download.DownloadManager that can be used to
77
  # download and extract URLs
78
  # urls_to_download = _URLS
79
- dl = _PATHS
80
 
81
- return [
82
- datasets.SplitGenerator(name=datasets.Split.TRAIN,
83
- gen_kwargs={"ex": dl["train_examples"], "lb":dl["train_labels"]}),
84
- datasets.SplitGenerator(name=datasets.Split.TEST,
85
- gen_kwargs={"ex": dl["test_examples"], "lb":dl["test_labels"]}),
86
- datasets.SplitGenerator(name=datasets.Split.VALIDATION,
87
- gen_kwargs={"ex": dl["dev_examples"], "lb":dl["dev_labels"]}),
88
- ]
89
 
90
- def _generate_examples(self, ex, lb):
91
  """Yields examples."""
92
  with open(ex, encoding="utf-8") as exf:
93
- with open(lb, encoding="utf-8") as lbf:
94
- for id_, (exi, lbi) in enumerate(zip(exf, lbf)):
95
  example = {}
96
  # 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
97
-
98
- word, _, positions, sent1, sent2 = exi.strip().split('\t')
99
- tokens1 = sent1.split(' ')
100
- tokens2 = sent2.split(' ')
101
- pos1, _, pos2 = positions.partition('-')
102
- pos1, pos2 = int(pos1), int(pos2)
103
- sentence1b = normalize_text(' '.join(tokens1[:pos1]))
104
- sentence1a = normalize_text(' '.join(tokens1[pos1+1:]))
105
- example["sentence1"] = normalize_text(' '.join(tokens1))
106
- sentence2b = normalize_text(' '.join(tokens2[:pos2]))
107
- sentence2a = normalize_text(' '.join(tokens2[pos2+1:]))
108
- example["sentence2"] = normalize_text(' '.join(tokens2))
109
- example["start1"] = 0 if pos1 == 0 else len(sentence1b) + 1
110
- example["start2"] = 0 if pos2 == 0 else len(sentence2b) + 1
111
- example["end1"] = example["start1"] + len(tokens1[pos1])
112
- example["end2"] = example["start2"] + len(tokens2[pos2])
113
- example["idx"] = id_
114
- example["word"] = word
115
- example["label"] = 1 if lbi.strip() == 'T' else 0
116
  yield id_, example
 
1
 
2
+ # @title Gold cwsd20 temp
3
  import json
4
  import datasets
5
 
 
22
  from training), both quantitavely and qualitatively.
23
  """
24
 
25
+ path = "/content/CoarseWSD-20/data/apple/train.data.txt"
26
 
 
 
 
 
 
 
 
 
27
 
 
 
28
 
29
  class CWSD20(datasets.GeneratorBasedBuilder):
30
  """TODO(WiCTSV): Short description of my dataset."""
31
 
32
  # TODO(WiCTSV): Set up version.
33
+ VERSION = datasets.Version("1.0.0")
34
 
35
  def _info(self):
36
  # TODO(WiCTSV): Specifies the datasets.DatasetInfo object
 
40
  # datasets.features.FeatureConnectors
41
  features=datasets.Features(
42
  {
 
 
43
  "idx": datasets.Value("int32"),
44
+ "sentence": datasets.Value("string"),
45
+ # "idx": datasets.Value("int32"),
46
+ # "word": datasets.Value("string"),
47
+ # "start1": datasets.Value("int32"),
48
+ # "start2": datasets.Value("int32"),
49
+ # "end1": datasets.Value("int32"),
50
+ # "end2": datasets.Value("int32"),
51
+ # "label": datasets.Value("int32")
52
  }
53
  ),
54
  # If there's a common (input, target) tuple from the features,
 
66
  # dl_manager is a datasets.download.DownloadManager that can be used to
67
  # download and extract URLs
68
  # urls_to_download = _URLS
69
+ dl = path
70
 
71
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
72
+ gen_kwargs={"ex": dl})]
 
 
 
 
 
 
73
 
74
+ def _generate_examples(self, ex):
75
  """Yields examples."""
76
  with open(ex, encoding="utf-8") as exf:
77
+ for id_, exi in enumerate(exf):
 
78
  example = {}
79
  # 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
80
+ parts = exi.split("\t")
81
+ idx = parts[0]
82
+ sent = parts[1]
83
+ example["sentence"] = sent
84
+ example["idx"] = idx
85
+
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  yield id_, example
coarsewsd_20.py DELETED
@@ -1,86 +0,0 @@
1
-
2
- # @title Gold cwsd20 temp
3
- import json
4
- import datasets
5
-
6
-
7
- _CITATION = """\
8
- @misc{loureiro2021analysis,
9
- title={Analysis and Evaluation of Language Models for Word Sense Disambiguation},
10
- author={Daniel Loureiro and Kiamehr Rezaee and Mohammad Taher Pilehvar and Jose Camacho-Collados},
11
- year={2021},
12
- eprint={2008.11608},
13
- archivePrefix={arXiv},
14
- primaryClass={cs.CL}
15
- }
16
- """
17
-
18
- _DESCRIPTION = """\
19
- The CoarseWSD-20 dataset is a coarse-grained sense disambiguation built from Wikipedia
20
- (nouns only) targetting 2 to 5 senses of 20 ambiguous words. It was specifically designed
21
- to provide an ideal setting for evaluating WSD models (e.g. no senses in test sets missing
22
- from training), both quantitavely and qualitatively.
23
- """
24
-
25
- path = "/content/CoarseWSD-20/data/apple/train.data.txt"
26
-
27
-
28
-
29
- class CWSD20(datasets.GeneratorBasedBuilder):
30
- """TODO(WiCTSV): Short description of my dataset."""
31
-
32
- # TODO(WiCTSV): Set up version.
33
- VERSION = datasets.Version("1.0.0")
34
-
35
- def _info(self):
36
- # TODO(WiCTSV): Specifies the datasets.DatasetInfo object
37
- return datasets.DatasetInfo(
38
- # This is the description that will appear on the datasets page.
39
- description=_DESCRIPTION,
40
- # datasets.features.FeatureConnectors
41
- features=datasets.Features(
42
- {
43
- "idx": datasets.Value("int32"),
44
- "sentence": datasets.Value("string"),
45
- # "idx": datasets.Value("int32"),
46
- # "word": datasets.Value("string"),
47
- # "start1": datasets.Value("int32"),
48
- # "start2": datasets.Value("int32"),
49
- # "end1": datasets.Value("int32"),
50
- # "end2": datasets.Value("int32"),
51
- # "label": datasets.Value("int32")
52
- }
53
- ),
54
- # If there's a common (input, target) tuple from the features,
55
- # specify them here. They'll be used if as_supervised=True in
56
- # builder.as_dataset.
57
- supervised_keys=None,
58
- # Homepage of the dataset for documentation
59
- homepage="https://github.com/google-research-datasets/boolean-questions",
60
- citation=_CITATION,
61
- )
62
-
63
- def _split_generators(self, dl_manager):
64
- """Returns SplitGenerators."""
65
- # TODO(WiCTSV): Downloads the data and defines the splits
66
- # dl_manager is a datasets.download.DownloadManager that can be used to
67
- # download and extract URLs
68
- # urls_to_download = _URLS
69
- dl = path
70
-
71
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN,
72
- gen_kwargs={"ex": dl})]
73
-
74
- def _generate_examples(self, ex):
75
- """Yields examples."""
76
- with open(ex, encoding="utf-8") as exf:
77
- for id_, exi in enumerate(exf):
78
- example = {}
79
- # 'word', 'sentence1', 'sentence2', 'start1', 'start2', 'end1', 'end2', 'idx', 'label'
80
- parts = exi.split("\t")
81
- idx = parts[0]
82
- sent = parts[1]
83
- example["sentence"] = sent
84
- example["idx"] = idx
85
-
86
- yield id_, example