sileod commited on
Commit
4a802a0
·
1 Parent(s): b6e2ea0

Create xnli.py

Browse files
Files changed (1) hide show
  1. xnli.py +210 -0
xnli.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """XNLI: The Cross-Lingual NLI Corpus."""
18
+
19
+
20
+ import collections
21
+ import csv
22
+ import os
23
+ from contextlib import ExitStack
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ @InProceedings{conneau2018xnli,
30
+ author = {Conneau, Alexis
31
+ and Rinott, Ruty
32
+ and Lample, Guillaume
33
+ and Williams, Adina
34
+ and Bowman, Samuel R.
35
+ and Schwenk, Holger
36
+ and Stoyanov, Veselin},
37
+ title = {XNLI: Evaluating Cross-lingual Sentence Representations},
38
+ booktitle = {Proceedings of the 2018 Conference on Empirical Methods
39
+ in Natural Language Processing},
40
+ year = {2018},
41
+ publisher = {Association for Computational Linguistics},
42
+ location = {Brussels, Belgium},
43
+ }"""
44
+
45
+ _DESCRIPTION = """\
46
+ XNLI is a subset of a few thousand examples from MNLI which has been translated
47
+ into a 14 different languages (some low-ish resource). As with MNLI, the goal is
48
+ to predict textual entailment (does sentence A imply/contradict/neither sentence
49
+ B) and is a classification task (given two sentences, predict one of three
50
+ labels).
51
+ """
52
+
53
+ _TRAIN_DATA_URL = "https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip"
54
+ _TESTVAL_DATA_URL = "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip"
55
+
56
+ _LANGUAGES = ("ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh")
57
+
58
+
59
+ class XnliConfig(datasets.BuilderConfig):
60
+ """BuilderConfig for XNLI."""
61
+
62
+ def __init__(self, language: str, languages=None, **kwargs):
63
+ """BuilderConfig for XNLI.
64
+ Args:
65
+ language: One of ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,vi,zh, or all_languages
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(XnliConfig, self).__init__(**kwargs)
69
+ self.language = language
70
+ if language != "all_languages":
71
+ self.languages = [language]
72
+ else:
73
+ self.languages = languages if languages is not None else _LANGUAGES
74
+
75
+
76
+ class Xnli(datasets.GeneratorBasedBuilder):
77
+ """XNLI: The Cross-Lingual NLI Corpus. Version 1.0."""
78
+
79
+ VERSION = datasets.Version("1.1.0", "")
80
+ BUILDER_CONFIG_CLASS = XnliConfig
81
+ BUILDER_CONFIGS = [
82
+ XnliConfig(
83
+ name=lang,
84
+ language=lang,
85
+ version=datasets.Version("1.1.0", ""),
86
+ description=f"Plain text import of XNLI for the {lang} language",
87
+ )
88
+ for lang in _LANGUAGES
89
+ ] + [
90
+ XnliConfig(
91
+ name="all_languages",
92
+ language="all_languages",
93
+ version=datasets.Version("1.1.0", ""),
94
+ description="Plain text import of XNLI for all languages",
95
+ )
96
+ ]
97
+
98
+ def _info(self):
99
+ if self.config.language == "all_languages":
100
+ features = datasets.Features(
101
+ {
102
+ "premise": datasets.Translation(
103
+ languages=_LANGUAGES,
104
+ ),
105
+ "hypothesis": datasets.TranslationVariableLanguages(
106
+ languages=_LANGUAGES,
107
+ ),
108
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
109
+ }
110
+ )
111
+ else:
112
+ features = datasets.Features(
113
+ {
114
+ "premise": datasets.Value("string"),
115
+ "hypothesis": datasets.Value("string"),
116
+ "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
117
+ }
118
+ )
119
+ return datasets.DatasetInfo(
120
+ description=_DESCRIPTION,
121
+ features=features,
122
+ # No default supervised_keys (as we have to pass both premise
123
+ # and hypothesis as input).
124
+ supervised_keys=None,
125
+ homepage="https://www.nyu.edu/projects/bowman/xnli/",
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+ dl_dirs = dl_manager.download_and_extract(
131
+ {
132
+ "train_data": _TRAIN_DATA_URL,
133
+ "testval_data": _TESTVAL_DATA_URL,
134
+ }
135
+ )
136
+ train_dir = os.path.join(dl_dirs["train_data"], "XNLI-MT-1.0", "multinli")
137
+ testval_dir = os.path.join(dl_dirs["testval_data"], "XNLI-1.0")
138
+ return [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TRAIN,
141
+ gen_kwargs={
142
+ "filepaths": [
143
+ os.path.join(train_dir, f"multinli.train.{lang}.tsv") for lang in self.config.languages
144
+ ],
145
+ "data_format": "XNLI-MT",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TEST,
150
+ gen_kwargs={"filepaths": [os.path.join(testval_dir, "xnli.test.tsv")], "data_format": "XNLI"},
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.VALIDATION,
154
+ gen_kwargs={"filepaths": [os.path.join(testval_dir, "xnli.dev.tsv")], "data_format": "XNLI"},
155
+ ),
156
+ ]
157
+
158
+ def _generate_examples(self, data_format, filepaths):
159
+ """This function returns the examples in the raw (text) form."""
160
+
161
+ if self.config.language == "all_languages":
162
+ if data_format == "XNLI-MT":
163
+ with ExitStack() as stack:
164
+ files = [stack.enter_context(open(filepath, encoding="utf-8")) for filepath in filepaths]
165
+ readers = [csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE) for file in files]
166
+ for row_idx, rows in enumerate(zip(*readers)):
167
+ yield row_idx, {
168
+ "premise": {lang: row["premise"] for lang, row in zip(self.config.languages, rows)},
169
+ "hypothesis": {lang: row["hypo"] for lang, row in zip(self.config.languages, rows)},
170
+ "label": rows[0]["label"].replace("contradictory", "contradiction"),
171
+ }
172
+ else:
173
+ rows_per_pair_id = collections.defaultdict(list)
174
+ for filepath in filepaths:
175
+ with open(filepath, encoding="utf-8") as f:
176
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
177
+ for row in reader:
178
+ rows_per_pair_id[row["pairID"]].append(row)
179
+
180
+ for rows in rows_per_pair_id.values():
181
+ premise = {row["language"]: row["sentence1"] for row in rows}
182
+ hypothesis = {row["language"]: row["sentence2"] for row in rows}
183
+ yield rows[0]["pairID"], {
184
+ "premise": premise,
185
+ "hypothesis": hypothesis,
186
+ "label": rows[0]["gold_label"],
187
+ }
188
+ else:
189
+ if data_format == "XNLI-MT":
190
+ for file_idx, filepath in enumerate(filepaths):
191
+ file = open(filepath, encoding="utf-8")
192
+ reader = csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
193
+ for row_idx, row in enumerate(reader):
194
+ key = str(file_idx) + "_" + str(row_idx)
195
+ yield key, {
196
+ "premise": row["premise"],
197
+ "hypothesis": row["hypo"],
198
+ "label": row["label"].replace("contradictory", "contradiction"),
199
+ }
200
+ else:
201
+ for filepath in filepaths:
202
+ with open(filepath, encoding="utf-8") as f:
203
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
204
+ for row in reader:
205
+ if row["language"] == self.config.language:
206
+ yield row["pairID"], {
207
+ "premise": row["sentence1"],
208
+ "hypothesis": row["sentence2"],
209
+ "label": row["gold_label"],
210
+ }