Junyeob Kim commited on
Commit
aa7201b
·
1 Parent(s): 4e4447c

First version of the your_dataset_name dataset.

Browse files
.gitattributes CHANGED
@@ -14,3 +14,27 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ FewGLUE_32dev/boolq/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
18
+ FewGLUE_32dev/boolq/train.jsonl filter=lfs diff=lfs merge=lfs -text
19
+ FewGLUE_32dev/boolq/val.jsonl filter=lfs diff=lfs merge=lfs -text
20
+ FewGLUE_32dev/cb/val.jsonl filter=lfs diff=lfs merge=lfs -text
21
+ FewGLUE_32dev/cb/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
22
+ FewGLUE_32dev/cb/train.jsonl filter=lfs diff=lfs merge=lfs -text
23
+ FewGLUE_32dev/copa/train.jsonl filter=lfs diff=lfs merge=lfs -text
24
+ FewGLUE_32dev/copa/val.jsonl filter=lfs diff=lfs merge=lfs -text
25
+ FewGLUE_32dev/copa/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
26
+ FewGLUE_32dev/multirc/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
27
+ FewGLUE_32dev/multirc/train.jsonl filter=lfs diff=lfs merge=lfs -text
28
+ FewGLUE_32dev/multirc/val.jsonl filter=lfs diff=lfs merge=lfs -text
29
+ FewGLUE_32dev/record/val.jsonl filter=lfs diff=lfs merge=lfs -text
30
+ FewGLUE_32dev/record/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
31
+ FewGLUE_32dev/record/train.jsonl filter=lfs diff=lfs merge=lfs -text
32
+ FewGLUE_32dev/rte/val.jsonl filter=lfs diff=lfs merge=lfs -text
33
+ FewGLUE_32dev/rte/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
34
+ FewGLUE_32dev/rte/train.jsonl filter=lfs diff=lfs merge=lfs -text
35
+ FewGLUE_32dev/wic/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
36
+ FewGLUE_32dev/wic/train.jsonl filter=lfs diff=lfs merge=lfs -text
37
+ FewGLUE_32dev/wic/val.jsonl filter=lfs diff=lfs merge=lfs -text
38
+ FewGLUE_32dev/wsc/train.jsonl filter=lfs diff=lfs merge=lfs -text
39
+ FewGLUE_32dev/wsc/val.jsonl filter=lfs diff=lfs merge=lfs -text
40
+ FewGLUE_32dev/wsc/dev32.jsonl filter=lfs diff=lfs merge=lfs -text
FewGLUE_32dev/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FewGLUE_32dev
2
+
3
+ This repository contains the FewGLUE_32dev dataset, an extension of the [FewGLUE](https://github.com/timoschick/fewglue), which enables NLU few-shot learning tasks to be benchmarked under a new 32-sample-dev setting. It has been proved in [previous work](https://arxiv.org/abs/2012.15723) that using larger development sets confer a significant advantage beyond few-shot. FewGLUE_32dev is built by adding additional few-shot dev sets with 32 examples randomly selected from the original/unused SuperGLUE training sets.
4
+
5
+
6
+ ### Data Format
7
+
8
+ The data files follow the exact same format as [SuperGLUE task files](https://super.gluebenchmark.com/tasks).
9
+
10
+
11
+ ### Structure
12
+
13
+ For each SuperGLUE task `T`, the directory `FewGLUE_32dev/T` contains the 32-sample-dev file (`dev32.jsonl`), which consists of 32 examples for few-shot validation.
14
+
15
+ To perform few-shot learning under 32-dev setting, the following files are also required, including the FewGLUE train files (`train.jsonl`)[[download](https://github.com/timoschick/fewglue)], and the SuperGLUE validation/test files (`val.jsonl`/`test.jsonl`)[[download](https://super.gluebenchmark.com/tasks)].
FewGLUE_32dev/boolq/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/boolq/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0af324b658a72fdd7767aa3c27c1dfafbc03e8e987a148b29e42a72d84b9f9e3
3
+ size 21820
FewGLUE_32dev/boolq/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:317245ba0842129234a368274701dfc196139aa627eff44aefc827c6888935e1
3
+ size 24840
FewGLUE_32dev/boolq/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c86a5045886e5795fe9052003873f7d94b88ed3028a33007c51d99e44fd66d9
3
+ size 2254565
FewGLUE_32dev/cb/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/cb/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec7e1bfe37a7ea4514fc2335bfa3b35be77bfe084aae2e7ca55f765aaee41337
3
+ size 11703
FewGLUE_32dev/cb/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6f446e5f117cbfbcd156d8221d163cc370924fa594a1d67f44a151ce5c171b2
3
+ size 13510
FewGLUE_32dev/cb/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6125587e9601560cb16ea223c33964c5a915568d2af68a81f651b6ef25395ec
3
+ size 24598
FewGLUE_32dev/copa/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/copa/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb43b015d061738467f284c60049ef0afcaf707ec8e528ab3c5c4285a5f5618a
3
+ size 5874
FewGLUE_32dev/copa/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9955ae6e0ee109c96c00c0957d5873c0cb4ea9aced83b6a15b15258bb64b7dd5
3
+ size 5818
FewGLUE_32dev/copa/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa61467cc251010178ed72f8ca82a0fceefc4ca9a85f87ec3b6102955e1a1f1a
3
+ size 18169
FewGLUE_32dev/multirc/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/multirc/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3734d2cac8d1e8d13e2f5e13bec727be7576b45453e9fd765e2075e163f9653d
3
+ size 65041
FewGLUE_32dev/multirc/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e584db1b55f7da7c1e6a2d0a6b56eb6edcaff26e69393d628390e70c39ccb561
3
+ size 64111
FewGLUE_32dev/multirc/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2b480195b15574ac00d52c4479f630bc161a7ab2de77e970fd00652846953ee
3
+ size 550086
FewGLUE_32dev/record/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/record/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c420c9c49ee310d8648e64893c72b8f69b94085d3207c303787c24409c438663
3
+ size 60220
FewGLUE_32dev/record/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22bbee303ffd8e2c29f36d253c14a1c3d072ceab18c31481cea8ba3ea28d89ca
3
+ size 62448
FewGLUE_32dev/record/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed228700e8ffc6a5af3209a7faf334c4ea61d4f07e79806b693504e6a7dbafb2
3
+ size 15236903
FewGLUE_32dev/rte/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/rte/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3ba6200d2f27691d927ab97d387292b045316956f877fbfccc9a4658af2b351
3
+ size 14751
FewGLUE_32dev/rte/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91a83197e3a286a8bf102e483646f87cd8b1e4c9c5ccd893bc0b9dba0643d580
3
+ size 12775
FewGLUE_32dev/rte/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab990507bf5f35dc176cba7770a72bfa2c50a88ed72b8cc95cfbf9db95ab1af1
3
+ size 104895
FewGLUE_32dev/wic/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/wic/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3df4ca565f109be9476caa51884ba13c5f2a33c939898cf80bb53e09fdc73062
3
+ size 7328
FewGLUE_32dev/wic/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82c0c3f79d2f19bdd3c894f90f9deff0ebff787822a99ff6420db733c2a88f0
3
+ size 7477
FewGLUE_32dev/wic/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f7207c88beba42cbc139de4c62ce05b23bffc189b3d561594b160e0c3920c0b
3
+ size 146302
FewGLUE_32dev/wsc/.DS_Store ADDED
Binary file (6.15 kB). View file
 
FewGLUE_32dev/wsc/dev32.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bbaca5c995778e7e14f389f1fe9a4be9a58d99e66db0d73d868ab66564b9da7
3
+ size 8368
FewGLUE_32dev/wsc/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e814e1a9a781c1e4b1ad98151f09f2e9326a3db51f9c52639cb0f8b1f428ad47
3
+ size 7947
FewGLUE_32dev/wsc/val.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c685ea4bdc347354036a28176459f6feeb0b6870375ecfb3d6ddb1f0e28bef
3
+ size 31167
few_glue.py ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The FewGLUE benchmark."""
18
+
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _SUPER_GLUE_CITATION = """\
27
+ @article{wang2019superglue,
28
+ title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
29
+ author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
30
+ journal={arXiv preprint arXiv:1905.00537},
31
+ year={2019}
32
+ }
33
+ Note that each SuperGLUE dataset has its own citation. Please see the source to
34
+ get the correct citation for each contained dataset.
35
+ """
36
+
37
+ _GLUE_DESCRIPTION = """\
38
+ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
39
+ GLUE with a new set of more difficult language understanding tasks, improved
40
+ resources, and a new public leaderboard.
41
+ """
42
+
43
+ _BOOLQ_DESCRIPTION = """\
44
+ BoolQ (Boolean Questions, Clark et al., 2019a) is a QA task where each example consists of a short
45
+ passage and a yes/no question about the passage. The questions are provided anonymously and
46
+ unsolicited by users of the Google search engine, and afterwards paired with a paragraph from a
47
+ Wikipedia article containing the answer. Following the original work, we evaluate with accuracy."""
48
+
49
+ _CB_DESCRIPTION = """\
50
+ The CommitmentBank (De Marneffe et al., 2019) is a corpus of short texts in which at least
51
+ one sentence contains an embedded clause. Each of these embedded clauses is annotated with the
52
+ degree to which we expect that the person who wrote the text is committed to the truth of the clause.
53
+ The resulting task framed as three-class textual entailment on examples that are drawn from the Wall
54
+ Street Journal, fiction from the British National Corpus, and Switchboard. Each example consists
55
+ of a premise containing an embedded clause and the corresponding hypothesis is the extraction of
56
+ that clause. We use a subset of the data that had inter-annotator agreement above 0.85. The data is
57
+ imbalanced (relatively fewer neutral examples), so we evaluate using accuracy and F1, where for
58
+ multi-class F1 we compute the unweighted average of the F1 per class."""
59
+
60
+ _COPA_DESCRIPTION = """\
61
+ The Choice Of Plausible Alternatives (COPA, Roemmele et al., 2011) dataset is a causal
62
+ reasoning task in which a system is given a premise sentence and two possible alternatives. The
63
+ system must choose the alternative which has the more plausible causal relationship with the premise.
64
+ The method used for the construction of the alternatives ensures that the task requires causal reasoning
65
+ to solve. Examples either deal with alternative possible causes or alternative possible effects of the
66
+ premise sentence, accompanied by a simple question disambiguating between the two instance
67
+ types for the model. All examples are handcrafted and focus on topics from online blogs and a
68
+ photography-related encyclopedia. Following the recommendation of the authors, we evaluate using
69
+ accuracy."""
70
+
71
+ _RECORD_DESCRIPTION = """\
72
+ (Reading Comprehension with Commonsense Reasoning Dataset, Zhang et al., 2018) is a
73
+ multiple-choice QA task. Each example consists of a news article and a Cloze-style question about
74
+ the article in which one entity is masked out. The system must predict the masked out entity from a
75
+ given list of possible entities in the provided passage, where the same entity may be expressed using
76
+ multiple different surface forms, all of which are considered correct. Articles are drawn from CNN
77
+ and Daily Mail. Following the original work, we evaluate with max (over all mentions) token-level
78
+ F1 and exact match (EM)."""
79
+
80
+ _RTE_DESCRIPTION = """\
81
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual competitions
82
+ on textual entailment, the problem of predicting whether a given premise sentence entails a given
83
+ hypothesis sentence (also known as natural language inference, NLI). RTE was previously included
84
+ in GLUE, and we use the same data and format as before: We merge data from RTE1 (Dagan
85
+ et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli
86
+ et al., 2009). All datasets are combined and converted to two-class classification: entailment and
87
+ not_entailment. Of all the GLUE tasks, RTE was among those that benefited from transfer learning
88
+ the most, jumping from near random-chance performance (~56%) at the time of GLUE's launch to
89
+ 85% accuracy (Liu et al., 2019c) at the time of writing. Given the eight point gap with respect to
90
+ human performance, however, the task is not yet solved by machines, and we expect the remaining
91
+ gap to be difficult to close."""
92
+
93
+ _MULTIRC_DESCRIPTION = """\
94
+ The Multi-Sentence Reading Comprehension dataset (MultiRC, Khashabi et al., 2018)
95
+ is a true/false question-answering task. Each example consists of a context paragraph, a question
96
+ about that paragraph, and a list of possible answers to that question which must be labeled as true or
97
+ false. Question-answering (QA) is a popular problem with many datasets. We use MultiRC because
98
+ of a number of desirable properties: (i) each question can have multiple possible correct answers,
99
+ so each question-answer pair must be evaluated independent of other pairs, (ii) the questions are
100
+ designed such that answering each question requires drawing facts from multiple context sentences,
101
+ and (iii) the question-answer pair format more closely matches the API of other SuperGLUE tasks
102
+ than span-based extractive QA does. The paragraphs are drawn from seven domains including news,
103
+ fiction, and historical text."""
104
+
105
+ _WIC_DESCRIPTION = """\
106
+ The Word-in-Context (WiC, Pilehvar and Camacho-Collados, 2019) dataset supports a word
107
+ sense disambiguation task cast as binary classification over sentence pairs. Given two sentences and a
108
+ polysemous (sense-ambiguous) word that appears in both sentences, the task is to determine whether
109
+ the word is used with the same sense in both sentences. Sentences are drawn from WordNet (Miller,
110
+ 1995), VerbNet (Schuler, 2005), and Wiktionary. We follow the original work and evaluate using
111
+ accuracy."""
112
+
113
+ _WSC_DESCRIPTION = """\
114
+ The Winograd Schema Challenge (WSC, Levesque et al., 2012) is a reading comprehension
115
+ task in which a system must read a sentence with a pronoun and select the referent of that pronoun
116
+ from a list of choices. Given the difficulty of this task and the headroom still left, we have included
117
+ WSC in SuperGLUE and recast the dataset into its coreference form. The task is cast as a binary
118
+ classification problem, as opposed to N-multiple choice, in order to isolate the model's ability to
119
+ understand the coreference links within a sentence as opposed to various other strategies that may
120
+ come into play in multiple choice conditions. With that in mind, we create a split with 65% negative
121
+ majority class in the validation set, reflecting the distribution of the hidden test set, and 52% negative
122
+ class in the training set. The training and validation examples are drawn from the original Winograd
123
+ Schema dataset (Levesque et al., 2012), as well as those distributed by the affiliated organization
124
+ Commonsense Reasoning. The test examples are derived from fiction books and have been shared
125
+ with us by the authors of the original dataset. Previously, a version of WSC recast as NLI as included
126
+ in GLUE, known as WNLI. No substantial progress was made on WNLI, with many submissions
127
+ opting to submit only majority class predictions. WNLI was made especially difficult due to an
128
+ adversarial train/dev split: Premise sentences that appeared in the training set sometimes appeared
129
+ in the development set with a different hypothesis and a flipped label. If a system memorized the
130
+ training set without meaningfully generalizing, which was easy due to the small size of the training
131
+ set, it could perform far below chance on the development set. We remove this adversarial design
132
+ in the SuperGLUE version of WSC by ensuring that no sentences are shared between the training,
133
+ validation, and test sets.
134
+ However, the validation and test sets come from different domains, with the validation set consisting
135
+ of ambiguous examples such that changing one non-noun phrase word will change the coreference
136
+ dependencies in the sentence. The test set consists only of more straightforward examples, with a
137
+ high number of noun phrases (and thus more choices for the model), but low to no ambiguity."""
138
+
139
+ _AXB_DESCRIPTION = """\
140
+ An expert-constructed,
141
+ diagnostic dataset that automatically tests models for a broad range of linguistic, commonsense, and
142
+ world knowledge. Each example in this broad-coverage diagnostic is a sentence pair labeled with
143
+ a three-way entailment relation (entailment, neutral, or contradiction) and tagged with labels that
144
+ indicate the phenomena that characterize the relationship between the two sentences. Submissions
145
+ to the GLUE leaderboard are required to include predictions from the submission's MultiNLI
146
+ classifier on the diagnostic dataset, and analyses of the results were shown alongside the main
147
+ leaderboard. Since this broad-coverage diagnostic task has proved difficult for top models, we retain
148
+ it in SuperGLUE. However, since MultiNLI is not part of SuperGLUE, we collapse contradiction
149
+ and neutral into a single not_entailment label, and request that submissions include predictions
150
+ on the resulting set from the model used for the RTE task.
151
+ """
152
+
153
+ _AXG_DESCRIPTION = """\
154
+ Winogender is designed to measure gender
155
+ bias in coreference resolution systems. We use the Diverse Natural Language Inference Collection
156
+ (DNC; Poliak et al., 2018) version that casts Winogender as a textual entailment task. Each example
157
+ consists of a premise sentence with a male or female pronoun and a hypothesis giving a possible
158
+ antecedent of the pronoun. Examples occur in minimal pairs, where the only difference between
159
+ an example and its pair is the gender of the pronoun in the premise. Performance on Winogender
160
+ is measured with both accuracy and the gender parity score: the percentage of minimal pairs for
161
+ which the predictions are the same. We note that a system can trivially obtain a perfect gender parity
162
+ score by guessing the same class for all examples, so a high gender parity score is meaningless unless
163
+ accompanied by high accuracy. As a diagnostic test of gender bias, we view the schemas as having high
164
+ positive predictive value and low negative predictive value; that is, they may demonstrate the presence
165
+ of gender bias in a system, but not prove its absence.
166
+ """
167
+
168
+ _BOOLQ_CITATION = """\
169
+ @inproceedings{clark2019boolq,
170
+ title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions},
171
+ author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina},
172
+ booktitle={NAACL},
173
+ year={2019}
174
+ }"""
175
+
176
+ _CB_CITATION = """\
177
+ @article{de marneff_simons_tonhauser_2019,
178
+ title={The CommitmentBank: Investigating projection in naturally occurring discourse},
179
+ journal={proceedings of Sinn und Bedeutung 23},
180
+ author={De Marneff, Marie-Catherine and Simons, Mandy and Tonhauser, Judith},
181
+ year={2019}
182
+ }"""
183
+
184
+ _COPA_CITATION = """\
185
+ @inproceedings{roemmele2011choice,
186
+ title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning},
187
+ author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S},
188
+ booktitle={2011 AAAI Spring Symposium Series},
189
+ year={2011}
190
+ }"""
191
+
192
+ _RECORD_CITATION = """\
193
+ @article{zhang2018record,
194
+ title={Record: Bridging the gap between human and machine commonsense reading comprehension},
195
+ author={Zhang, Sheng and Liu, Xiaodong and Liu, Jingjing and Gao, Jianfeng and Duh, Kevin and Van Durme, Benjamin},
196
+ journal={arXiv preprint arXiv:1810.12885},
197
+ year={2018}
198
+ }"""
199
+
200
+ _RTE_CITATION = """\
201
+ @inproceedings{dagan2005pascal,
202
+ title={The PASCAL recognising textual entailment challenge},
203
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
204
+ booktitle={Machine Learning Challenges Workshop},
205
+ pages={177--190},
206
+ year={2005},
207
+ organization={Springer}
208
+ }
209
+ @inproceedings{bar2006second,
210
+ title={The second pascal recognising textual entailment challenge},
211
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
212
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
213
+ volume={6},
214
+ number={1},
215
+ pages={6--4},
216
+ year={2006},
217
+ organization={Venice}
218
+ }
219
+ @inproceedings{giampiccolo2007third,
220
+ title={The third pascal recognizing textual entailment challenge},
221
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
222
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
223
+ pages={1--9},
224
+ year={2007},
225
+ organization={Association for Computational Linguistics}
226
+ }
227
+ @inproceedings{bentivogli2009fifth,
228
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
229
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
230
+ booktitle={TAC},
231
+ year={2009}
232
+ }"""
233
+
234
+ _MULTIRC_CITATION = """\
235
+ @inproceedings{MultiRC2018,
236
+ author = {Daniel Khashabi and Snigdha Chaturvedi and Michael Roth and Shyam Upadhyay and Dan Roth},
237
+ title = {Looking Beyond the Surface:A Challenge Set for Reading Comprehension over Multiple Sentences},
238
+ booktitle = {Proceedings of North American Chapter of the Association for Computational Linguistics (NAACL)},
239
+ year = {2018}
240
+ }"""
241
+
242
+ _WIC_CITATION = """\
243
+ @article{DBLP:journals/corr/abs-1808-09121,
244
+ author={Mohammad Taher Pilehvar and os{\'{e}} Camacho{-}Collados},
245
+ title={WiC: 10, 000 Example Pairs for Evaluating Context-Sensitive Representations},
246
+ journal={CoRR},
247
+ volume={abs/1808.09121},
248
+ year={2018},
249
+ url={http://arxiv.org/abs/1808.09121},
250
+ archivePrefix={arXiv},
251
+ eprint={1808.09121},
252
+ timestamp={Mon, 03 Sep 2018 13:36:40 +0200},
253
+ biburl={https://dblp.org/rec/bib/journals/corr/abs-1808-09121},
254
+ bibsource={dblp computer science bibliography, https://dblp.org}
255
+ }"""
256
+
257
+ _WSC_CITATION = """\
258
+ @inproceedings{levesque2012winograd,
259
+ title={The winograd schema challenge},
260
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
261
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
262
+ year={2012}
263
+ }"""
264
+
265
+ _AXG_CITATION = """\
266
+ @inproceedings{rudinger-EtAl:2018:N18,
267
+ author = {Rudinger, Rachel and Naradowsky, Jason and Leonard, Brian and {Van Durme}, Benjamin},
268
+ title = {Gender Bias in Coreference Resolution},
269
+ booktitle = {Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
270
+ month = {June},
271
+ year = {2018},
272
+ address = {New Orleans, Louisiana},
273
+ publisher = {Association for Computational Linguistics}
274
+ }
275
+ """
276
+
277
+
278
+ class FewGlueConfig(datasets.BuilderConfig):
279
+ """BuilderConfig for SuperGLUE."""
280
+
281
+ def __init__(self, features, citation, url, label_classes=("False", "True"), **kwargs):
282
+ """BuilderConfig for SuperGLUE.
283
+ Args:
284
+ features: `list[string]`, list of the features that will appear in the
285
+ feature dict. Should not include "label".
286
+ citation: `string`, citation for the data set.
287
+ url: `string`, url for information about the data set.
288
+ label_classes: `list[string]`, the list of classes for the label if the
289
+ label is present as a string. Non-string labels will be cast to either
290
+ 'False' or 'True'.
291
+ **kwargs: keyword arguments forwarded to super.
292
+ """
293
+ # Version history:
294
+ # 1.0.2: Fixed non-nondeterminism in ReCoRD.
295
+ # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
296
+ # the full release (v2.0).
297
+ # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
298
+ # 0.0.2: Initial version.
299
+ super(FewGlueConfig, self).__init__(version=datasets.Version("1.0.2"), **kwargs)
300
+ self.features = features
301
+ self.label_classes = label_classes
302
+ self.citation = citation
303
+ self.url = url
304
+
305
+
306
+ class FewGlue(datasets.GeneratorBasedBuilder):
307
+ """The FewGLUE benchmark."""
308
+
309
+ BUILDER_CONFIGS = [
310
+ FewGlueConfig(
311
+ name="boolq",
312
+ description=_BOOLQ_DESCRIPTION,
313
+ features=["question", "passage"],
314
+ citation=_BOOLQ_CITATION,
315
+ url="https://github.com/google-research-datasets/boolean-questions",
316
+ ),
317
+ FewGlueConfig(
318
+ name="cb",
319
+ description=_CB_DESCRIPTION,
320
+ features=["premise", "hypothesis"],
321
+ label_classes=["entailment", "contradiction", "neutral"],
322
+ citation=_CB_CITATION,
323
+ url="https://github.com/mcdm/CommitmentBank",
324
+ ),
325
+ FewGlueConfig(
326
+ name="copa",
327
+ description=_COPA_DESCRIPTION,
328
+ label_classes=["choice1", "choice2"],
329
+ # Note that question will only be the X in the statement "What's
330
+ # the X for this?".
331
+ features=["premise", "choice1", "choice2", "question"],
332
+ citation=_COPA_CITATION,
333
+ url="http://people.ict.usc.edu/~gordon/copa.html",
334
+ ),
335
+ FewGlueConfig(
336
+ name="multirc",
337
+ description=_MULTIRC_DESCRIPTION,
338
+ features=["paragraph", "question", "answer"],
339
+ citation=_MULTIRC_CITATION,
340
+ url="https://cogcomp.org/multirc/",
341
+ ),
342
+ FewGlueConfig(
343
+ name="record",
344
+ description=_RECORD_DESCRIPTION,
345
+ # Note that entities and answers will be a sequences of strings. Query
346
+ # will contain @placeholder as a substring, which represents the word
347
+ # to be substituted in.
348
+ features=["passage", "query", "entities", "answers"],
349
+ citation=_RECORD_CITATION,
350
+ url="https://sheng-z.github.io/ReCoRD-explorer/",
351
+ ),
352
+ FewGlueConfig(
353
+ name="rte",
354
+ description=_RTE_DESCRIPTION,
355
+ features=["premise", "hypothesis"],
356
+ label_classes=["entailment", "not_entailment"],
357
+ citation=_RTE_CITATION,
358
+ url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
359
+ ),
360
+ FewGlueConfig(
361
+ name="wic",
362
+ description=_WIC_DESCRIPTION,
363
+ # Note that start1, start2, end1, and end2 will be integers stored as
364
+ # datasets.Value('int32').
365
+ features=["word", "sentence1", "sentence2", "start1", "start2", "end1", "end2"],
366
+ citation=_WIC_CITATION,
367
+ url="https://pilehvar.github.io/wic/",
368
+ ),
369
+ FewGlueConfig(
370
+ name="wsc",
371
+ description=_WSC_DESCRIPTION,
372
+ # Note that span1_index and span2_index will be integers stored as
373
+ # datasets.Value('int32').
374
+ features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
375
+ citation=_WSC_CITATION,
376
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
377
+ ),
378
+ FewGlueConfig(
379
+ name="wsc.fixed",
380
+ description=(
381
+ _WSC_DESCRIPTION + "\n\nThis version fixes issues where the spans are not actually "
382
+ "substrings of the text."
383
+ ),
384
+ # Note that span1_index and span2_index will be integers stored as
385
+ # datasets.Value('int32').
386
+ features=["text", "span1_index", "span2_index", "span1_text", "span2_text"],
387
+ citation=_WSC_CITATION,
388
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
389
+ ),
390
+ ]
391
+
392
+ def _info(self):
393
+ features = {feature: datasets.Value("string") for feature in self.config.features}
394
+ if self.config.name.startswith("wsc"):
395
+ features["span1_index"] = datasets.Value("int32")
396
+ features["span2_index"] = datasets.Value("int32")
397
+ if self.config.name == "wic":
398
+ features["start1"] = datasets.Value("int32")
399
+ features["start2"] = datasets.Value("int32")
400
+ features["end1"] = datasets.Value("int32")
401
+ features["end2"] = datasets.Value("int32")
402
+ if self.config.name == "multirc":
403
+ features["idx"] = dict(
404
+ {
405
+ "paragraph": datasets.Value("int32"),
406
+ "question": datasets.Value("int32"),
407
+ "answer": datasets.Value("int32"),
408
+ }
409
+ )
410
+ elif self.config.name == "record":
411
+ features["idx"] = dict(
412
+ {
413
+ "passage": datasets.Value("int32"),
414
+ "query": datasets.Value("int32"),
415
+ }
416
+ )
417
+ else:
418
+ features["idx"] = datasets.Value("int32")
419
+
420
+ if self.config.name == "record":
421
+ # Entities are the set of possible choices for the placeholder.
422
+ features["entities"] = datasets.features.Sequence(datasets.Value("string"))
423
+ # Answers are the subset of entities that are correct.
424
+ features["answers"] = datasets.features.Sequence(datasets.Value("string"))
425
+ else:
426
+ features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
427
+
428
+ return datasets.DatasetInfo(
429
+ description=_GLUE_DESCRIPTION + self.config.description,
430
+ features=datasets.Features(features),
431
+ homepage=self.config.url,
432
+ citation=self.config.citation + "\n" + _SUPER_GLUE_CITATION,
433
+ )
434
+
435
+ def _split_generators(self, dl_manager):
436
+ dl_dir = 'FewGLUE_32dev'
437
+ dl_dir = os.path.join(dl_dir, self.config.name)
438
+
439
+ return [
440
+ datasets.SplitGenerator(
441
+ name=datasets.Split.TRAIN,
442
+ gen_kwargs={
443
+ "data_file": os.path.join(dl_dir, "train.jsonl"),
444
+ "split": datasets.Split.TRAIN,
445
+ },
446
+ ),
447
+ datasets.SplitGenerator(
448
+ name=datasets.Split.VALIDATION,
449
+ gen_kwargs={
450
+ "data_file": os.path.join(dl_dir, "dev32.jsonl"),
451
+ "split": datasets.Split.VALIDATION,
452
+ },
453
+ ),
454
+ datasets.SplitGenerator(
455
+ name=datasets.Split.TEST,
456
+ gen_kwargs={
457
+ "data_file": os.path.join(dl_dir, "val.jsonl"),
458
+ "split": datasets.Split.TEST,
459
+ },
460
+ ),
461
+ ]
462
+
463
+ def _generate_examples(self, data_file, split):
464
+ with open(data_file, encoding="utf-8") as f:
465
+ for line in f:
466
+ row = json.loads(line)
467
+
468
+ if self.config.name == "multirc":
469
+ paragraph = row["passage"]
470
+ for question in paragraph["questions"]:
471
+ for answer in question["answers"]:
472
+ label = answer.get("label")
473
+ key = "%s_%s_%s" % (row["idx"], question["idx"], answer["idx"])
474
+ yield key, {
475
+ "paragraph": paragraph["text"],
476
+ "question": question["question"],
477
+ "answer": answer["text"],
478
+ "label": -1 if label is None else _cast_label(bool(label)),
479
+ "idx": {"paragraph": row["idx"], "question": question["idx"], "answer": answer["idx"]},
480
+ }
481
+ elif self.config.name == "record":
482
+ passage = row["passage"]
483
+ for qa in row["qas"]:
484
+ yield qa["idx"], {
485
+ "passage": passage["text"],
486
+ "query": qa["query"],
487
+ "entities": _get_record_entities(passage),
488
+ "answers": _get_record_answers(qa),
489
+ "idx": {"passage": row["idx"], "query": qa["idx"]},
490
+ }
491
+ else:
492
+ if self.config.name.startswith("wsc"):
493
+ row.update(row["target"])
494
+ example = {feature: row[feature] for feature in self.config.features}
495
+ if self.config.name == "wsc.fixed":
496
+ example = _fix_wst(example)
497
+ example["idx"] = row["idx"]
498
+
499
+ if "label" in row:
500
+ if self.config.name == "copa":
501
+ example["label"] = "choice2" if row["label"] else "choice1"
502
+ else:
503
+ example["label"] = _cast_label(row["label"])
504
+ else:
505
+ assert split == datasets.Split.TEST, row
506
+ example["label"] = -1
507
+ yield example["idx"], example
508
+
509
+
510
+ def _fix_wst(ex):
511
+ """Fixes most cases where spans are not actually substrings of text."""
512
+
513
+ def _fix_span_text(k):
514
+ """Fixes a single span."""
515
+ text = ex[k + "_text"]
516
+ index = ex[k + "_index"]
517
+
518
+ if text in ex["text"]:
519
+ return
520
+
521
+ if text in ("Kamenev and Zinoviev", "Kamenev, Zinoviev, and Stalin"):
522
+ # There is no way to correct these examples since the subjects have
523
+ # intervening text.
524
+ return
525
+
526
+ if "theyscold" in text:
527
+ ex["text"].replace("theyscold", "they scold")
528
+ ex["span2_index"] = 10
529
+ # Make sure case of the first words match.
530
+ first_word = ex["text"].split()[index]
531
+ if first_word[0].islower():
532
+ text = text[0].lower() + text[1:]
533
+ else:
534
+ text = text[0].upper() + text[1:]
535
+ # Remove punctuation in span.
536
+ text = text.rstrip(".")
537
+ # Replace incorrect whitespace character in span.
538
+ text = text.replace("\n", " ")
539
+ ex[k + "_text"] = text
540
+ assert ex[k + "_text"] in ex["text"], ex
541
+
542
+ _fix_span_text("span1")
543
+ _fix_span_text("span2")
544
+ return ex
545
+
546
+
547
+ def _cast_label(label):
548
+ """Converts the label into the appropriate string version."""
549
+ if isinstance(label, str):
550
+ return label
551
+ elif isinstance(label, bool):
552
+ return "True" if label else "False"
553
+ elif isinstance(label, int):
554
+ assert label in (0, 1)
555
+ return str(label)
556
+ else:
557
+ raise ValueError("Invalid label format.")
558
+
559
+
560
+ def _get_record_entities(passage):
561
+ """Returns the unique set of entities."""
562
+ text = passage["text"]
563
+ entities = set()
564
+ for entity in passage["entities"]:
565
+ entities.add(text[entity["start"] : entity["end"] + 1])
566
+ return sorted(entities)
567
+
568
+
569
+ def _get_record_answers(qa):
570
+ """Returns the unique set of answers."""
571
+ if "answers" not in qa:
572
+ return []
573
+ answers = set()
574
+ for answer in qa["answers"]:
575
+ answers.add(answer["text"])
576
+ return sorted(answers)
577
+
578
+
579
+ def _get_task_name_from_data_url(data_url):
580
+ return data_url.split("/")[-1].split(".")[0]