ghomasHudson commited on
Commit
6a95b0d
·
1 Parent(s): 2f0d5d9

Create vlsp.py

Browse files
Files changed (1) hide show
  1. vlsp.py +81 -0
vlsp.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """VLSP: Very Long Scientific Papers"""
16
+
17
+
18
+ import os
19
+ import glob
20
+
21
+ import datasets
22
+
23
+
24
+ _CITATION = """"""
25
+ _DESCRIPTION = """\
26
+ Very Long version of the scientific papers summarization dataset. Only includes theses over 10,000 tokens long.
27
+ """
28
+ _HOMEPAGE = "https://github.com/ghomasHudson/very_long_scientific_papers"
29
+ _URL = "https://github.com/ghomasHudson/very_long_scientific_papers/archive/master.zip"
30
+
31
+ class VLSP(datasets.GeneratorBasedBuilder):
32
+ """VLSP: Very Long Scientific Papers"""
33
+
34
+ VERSION = datasets.Version("1.1.0")
35
+
36
+ BUILDER_CONFIGS = [
37
+ datasets.BuilderConfig(name="arxiv", description="Arxiv theses"),
38
+ ]
39
+
40
+ DEFAULT_CONFIG_NAME = "arxiv"
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=datasets.Features(
46
+ {
47
+ "abstract": datasets.features.Sequence(
48
+ datasets.Value("string")
49
+ ),
50
+ "article": datasets.Value("string"),
51
+ }
52
+ ),
53
+ homepage=_HOMEPAGE,
54
+ citation=_CITATION,
55
+ )
56
+
57
+ def _split_generators(self, dl_manager):
58
+ data_dir = dl_manager.download_and_extract(_URL)
59
+ return [
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TEST,
62
+ gen_kwargs={
63
+ "filepath": os.path.join(data_dir,"very_long_scientific_papers-master", "deduped-test"),
64
+ }
65
+ )
66
+ ]
67
+
68
+ def _generate_examples(self, filepath):
69
+ for key, fn in enumerate(glob.glob(os.path.join(filepath, "*.main.txt"))):
70
+ summary = []
71
+ summ_fn = fn.replace(".main.", ".abstract.")
72
+ if os.path.exists(summ_fn):
73
+ summary.append(open(summ_fn).read())
74
+ summ_fn = fn.replace(".main.", ".abstract-long.")
75
+ if os.path.exists(summ_fn):
76
+ summary.append(open(summ_fn).read())
77
+
78
+ yield key, {
79
+ "article": open(fn).read(),
80
+ "abstract": summary
81
+ }