Update scrolls.py
Browse files- scrolls.py +15 -4
scrolls.py
CHANGED
@@ -158,6 +158,7 @@ _QUALITY_CITATION = """\
|
|
158 |
}
|
159 |
"""
|
160 |
|
|
|
161 |
class ScrollsConfig(datasets.BuilderConfig):
|
162 |
"""BuilderConfig for Scrolls."""
|
163 |
|
@@ -186,13 +187,16 @@ class ScrollsConfig(datasets.BuilderConfig):
|
|
186 |
self.citation = citation
|
187 |
self.url = url
|
188 |
|
|
|
189 |
class QualityConfig(ScrollsConfig):
|
190 |
def __init__(self, **kwargs):
|
191 |
super().__init__(**kwargs)
|
192 |
self.hard_only = False
|
193 |
|
|
|
194 |
class Scrolls(datasets.GeneratorBasedBuilder):
|
195 |
"""The SuperGLUE benchmark."""
|
|
|
196 |
features = ["id", "pid", "input", "output"]
|
197 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
198 |
BUILDER_CONFIGS = [
|
@@ -251,7 +255,7 @@ class Scrolls(datasets.GeneratorBasedBuilder):
|
|
251 |
data_url="https://scrolls-tau.s3.us-east-2.amazonaws.com/quality.zip",
|
252 |
citation=_QUALITY_DESCRIPTION,
|
253 |
url="https://github.com/nyu-mll/quality",
|
254 |
-
)
|
255 |
]
|
256 |
|
257 |
def _info(self):
|
@@ -268,6 +272,12 @@ class Scrolls(datasets.GeneratorBasedBuilder):
|
|
268 |
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
269 |
task_name = _get_task_name_from_data_url(self.config.data_url)
|
270 |
dl_dir = os.path.join(dl_dir, task_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
return [
|
272 |
datasets.SplitGenerator(
|
273 |
name=datasets.Split.TRAIN,
|
@@ -286,7 +296,7 @@ class Scrolls(datasets.GeneratorBasedBuilder):
|
|
286 |
datasets.SplitGenerator(
|
287 |
name=datasets.Split.TEST,
|
288 |
gen_kwargs={
|
289 |
-
"data_file": os.path.join(dl_dir, "test.jsonl") if
|
290 |
"split": datasets.Split.TEST,
|
291 |
},
|
292 |
),
|
@@ -298,11 +308,12 @@ class Scrolls(datasets.GeneratorBasedBuilder):
|
|
298 |
row = json.loads(line)
|
299 |
|
300 |
if self.config.name == "quality":
|
301 |
-
|
|
|
302 |
continue
|
303 |
|
304 |
yield row["pid"], row
|
305 |
|
306 |
|
307 |
def _get_task_name_from_data_url(data_url):
|
308 |
-
return data_url.split("/")[-1].split(".")[0]
|
|
|
158 |
}
|
159 |
"""
|
160 |
|
161 |
+
|
162 |
class ScrollsConfig(datasets.BuilderConfig):
|
163 |
"""BuilderConfig for Scrolls."""
|
164 |
|
|
|
187 |
self.citation = citation
|
188 |
self.url = url
|
189 |
|
190 |
+
|
191 |
class QualityConfig(ScrollsConfig):
|
192 |
def __init__(self, **kwargs):
|
193 |
super().__init__(**kwargs)
|
194 |
self.hard_only = False
|
195 |
|
196 |
+
|
197 |
class Scrolls(datasets.GeneratorBasedBuilder):
|
198 |
"""The SuperGLUE benchmark."""
|
199 |
+
|
200 |
features = ["id", "pid", "input", "output"]
|
201 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
202 |
BUILDER_CONFIGS = [
|
|
|
255 |
data_url="https://scrolls-tau.s3.us-east-2.amazonaws.com/quality.zip",
|
256 |
citation=_QUALITY_DESCRIPTION,
|
257 |
url="https://github.com/nyu-mll/quality",
|
258 |
+
),
|
259 |
]
|
260 |
|
261 |
def _info(self):
|
|
|
272 |
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
273 |
task_name = _get_task_name_from_data_url(self.config.data_url)
|
274 |
dl_dir = os.path.join(dl_dir, task_name)
|
275 |
+
|
276 |
+
data_files = {} if self.config.data_files is not None else None
|
277 |
+
if data_files is not None:
|
278 |
+
for split, paths in self.config.data_files.items():
|
279 |
+
data_files[split] = paths[0]
|
280 |
+
|
281 |
return [
|
282 |
datasets.SplitGenerator(
|
283 |
name=datasets.Split.TRAIN,
|
|
|
296 |
datasets.SplitGenerator(
|
297 |
name=datasets.Split.TEST,
|
298 |
gen_kwargs={
|
299 |
+
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
|
300 |
"split": datasets.Split.TEST,
|
301 |
},
|
302 |
),
|
|
|
308 |
row = json.loads(line)
|
309 |
|
310 |
if self.config.name == "quality":
|
311 |
+
is_hard = row.pop("is_hard", False)
|
312 |
+
if self.config.hard_only and is_hard:
|
313 |
continue
|
314 |
|
315 |
yield row["pid"], row
|
316 |
|
317 |
|
318 |
def _get_task_name_from_data_url(data_url):
|
319 |
+
return data_url.split("/")[-1].split(".")[0]
|