Datasets:

Modalities:
Text
Libraries:
Datasets
dibyaaaaax commited on
Commit
c8d4e46
·
1 Parent(s): 2e3fc19

Upload www.py

Browse files
Files changed (1) hide show
  1. www.py +147 -0
www.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @inproceedings{caragea-etal-2014-citation,
7
+ title = "Citation-Enhanced Keyphrase Extraction from Research Papers: A Supervised Approach",
8
+ author = "Caragea, Cornelia and
9
+ Bulgarov, Florin Adrian and
10
+ Godea, Andreea and
11
+ Das Gollapalli, Sujatha",
12
+ booktitle = "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing ({EMNLP})",
13
+ month = oct,
14
+ year = "2014",
15
+ address = "Doha, Qatar",
16
+ publisher = "Association for Computational Linguistics",
17
+ url = "https://aclanthology.org/D14-1150",
18
+ doi = "10.3115/v1/D14-1150",
19
+ pages = "1435--1446",
20
+ }
21
+
22
+
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+
27
+ """
28
+
29
+ _HOMEPAGE = ""
30
+
31
+ # TODO: Add the licence for the dataset here if you can find it
32
+ _LICENSE = ""
33
+
34
+ # TODO: Add link to the official dataset URLs here
35
+
36
+ _URLS = {
37
+ "test": "test.jsonl"
38
+ }
39
+
40
+
41
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
42
+ class WWW(datasets.GeneratorBasedBuilder):
43
+ """TODO: Short description of my dataset."""
44
+
45
+ VERSION = datasets.Version("0.0.1")
46
+
47
+ BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(name="extraction", version=VERSION,
49
+ description="This part of my dataset covers extraction"),
50
+ datasets.BuilderConfig(name="generation", version=VERSION,
51
+ description="This part of my dataset covers generation"),
52
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
53
+ ]
54
+
55
+ DEFAULT_CONFIG_NAME = "extraction"
56
+
57
+ def _info(self):
58
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
59
+ features = datasets.Features(
60
+ {
61
+ "id": datasets.Value("int64"),
62
+ "document": datasets.features.Sequence(datasets.Value("string")),
63
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
64
+
65
+ }
66
+ )
67
+ elif self.config.name == "generation":
68
+ features = datasets.Features(
69
+ {
70
+ "id": datasets.Value("int64"),
71
+ "document": datasets.features.Sequence(datasets.Value("string")),
72
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
73
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
74
+
75
+ }
76
+ )
77
+ else:
78
+ features = datasets.Features(
79
+ {
80
+ "id": datasets.Value("int64"),
81
+ "document": datasets.features.Sequence(datasets.Value("string")),
82
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
83
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
84
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
85
+ "other_metadata": datasets.features.Sequence(
86
+ {
87
+ "text": datasets.features.Sequence(datasets.Value("string")),
88
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
89
+ }
90
+ )
91
+
92
+ }
93
+ )
94
+ return datasets.DatasetInfo(
95
+ # This is the description that will appear on the datasets page.
96
+ description=_DESCRIPTION,
97
+ # This defines the different columns of the dataset and their types
98
+ features=features,
99
+ homepage=_HOMEPAGE,
100
+ # License for the dataset if available
101
+ license=_LICENSE,
102
+ # Citation for the dataset
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+
108
+ data_dir = dl_manager.download_and_extract(_URLS)
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "filepath": data_dir['test'],
115
+ "split": "test"
116
+ },
117
+ ),
118
+ ]
119
+
120
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
121
+ def _generate_examples(self, filepath, split):
122
+ with open(filepath, encoding="utf-8") as f:
123
+ for key, row in enumerate(f):
124
+ data = json.loads(row)
125
+ if self.config.name == "extraction":
126
+ # Yields examples as (key, example) tuples
127
+ yield key, {
128
+ "id": data['paper_id'],
129
+ "document": data["document"],
130
+ "doc_bio_tags": data.get("doc_bio_tags")
131
+ }
132
+ elif self.config.name == "generation":
133
+ yield key, {
134
+ "id": data['paper_id'],
135
+ "document": data["document"],
136
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
137
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
138
+ }
139
+ else:
140
+ yield key, {
141
+ "id": data['paper_id'],
142
+ "document": data["document"],
143
+ "doc_bio_tags": data.get("doc_bio_tags"),
144
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
145
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
146
+ "other_metadata": data["other_metadata"]
147
+ }