Datasets:

Modalities:
Text
Size:
< 1K
Libraries:
Datasets
rajivratn commited on
Commit
4d7801a
·
1 Parent(s): 97e7383

Update test_ldkp.py

Browse files
Files changed (1) hide show
  1. test_ldkp.py +0 -153
test_ldkp.py CHANGED
@@ -1,153 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
-
5
- import datasets
6
- from typing import List, Any
7
-
8
- # TODO: Add BibTeX citation
9
- # Find for instance the citation on arxiv or on the dataset repo/website
10
- _CITATION = """\
11
- author: amardeep
12
- """
13
-
14
- # TODO: Add description of the dataset here
15
- # You can copy an official description
16
- _DESCRIPTION = """\
17
- This new dataset is designed to solve kp NLP task and is crafted with a lot of care.
18
- """
19
-
20
- # TODO: Add a link to an official homepage for the dataset here
21
- _HOMEPAGE = ""
22
-
23
- # TODO: Add the licence for the dataset here if you can find it
24
- _LICENSE = ""
25
-
26
- # TODO: Add link to the official dataset URLs here
27
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
28
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
29
- _URLS = {
30
- "test": "test.jsonl",
31
- "train": "train.jsonl",
32
- "valid": "valid.jsonl"
33
- }
34
-
35
-
36
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
37
- class TestLDKP(datasets.GeneratorBasedBuilder):
38
- """TODO: Short description of my dataset."""
39
-
40
- VERSION = datasets.Version("1.1.0")
41
-
42
- # This is an example of a dataset with multiple configurations.
43
- # If you don't want/need to define several sub-sets in your dataset,
44
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
45
-
46
- # If you need to make complex sub-parts in the datasets with configurable options
47
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
48
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
49
-
50
- # You will be able to load one or the other configurations in the following list with
51
- # data = datasets.load_dataset('my_dataset', 'first_domain')
52
- # data = datasets.load_dataset('my_dataset', 'second_domain')
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(name="ldkp", version=VERSION, description="This part of my dataset covers long document"),
55
- datasets.BuilderConfig(name="normal", version=VERSION, description="This part of my dataset covers abstract only"),
56
- ]
57
-
58
- DEFAULT_CONFIG_NAME = "normal" # It's not mandatory to have a default configuration. Just use one if it make sense.
59
-
60
- def _info(self):
61
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
62
- if self.config.name == "ldkp": # This is the name of the configuration selected in BUILDER_CONFIGS above
63
- features = datasets.Features(
64
- {
65
- "text": list,
66
- "BIO_tags": list
67
- #"text": datasets.Value("string"),
68
- #"BIO_tags": datasets.Value("string")
69
- # "answer": datasets.Value("string")
70
- # These are the features of your dataset like images, labels ...
71
- }
72
- )
73
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
74
- features = datasets.Features(
75
- {
76
- "text": datasets.Value("string"),
77
- "BIO_tags": datasets.Value("string")
78
- # "second_domain_answer": datasets.Value("string")
79
- # These are the features of your dataset like images, labels ...
80
- }
81
- )
82
- return datasets.DatasetInfo(
83
- # This is the description that will appear on the datasets page.
84
- description=_DESCRIPTION,
85
- # This defines the different columns of the dataset and their types
86
- features=features, # Here we define them above because they are different between the two configurations
87
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
88
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
89
- # supervised_keys=("sentence", "label"),
90
- # Homepage of the dataset for documentation
91
- homepage=_HOMEPAGE,
92
- # License for the dataset if available
93
- license=_LICENSE,
94
- # Citation for the dataset
95
- citation=_CITATION,
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
100
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
101
-
102
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
103
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
104
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
105
- # urls = _URLS[self.config.name]
106
- data_dir = dl_manager.download_and_extract(_URLS)
107
- return [
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TRAIN,
110
- # These kwargs will be passed to _generate_examples
111
- gen_kwargs={
112
- "filepath": data_dir['train'],
113
- "split": "train",
114
- },
115
- ),
116
- datasets.SplitGenerator(
117
- name=datasets.Split.TEST,
118
- # These kwargs will be passed to _generate_examples
119
- gen_kwargs={
120
- "filepath": data_dir['test'],
121
- "split": "test"
122
- },
123
- ),
124
- datasets.SplitGenerator(
125
- name=datasets.Split.VALIDATION,
126
- # These kwargs will be passed to _generate_examples
127
- gen_kwargs={
128
- "filepath": data_dir['valid'],
129
- "split": "valid",
130
- },
131
- ),
132
- ]
133
-
134
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
135
- def _generate_examples(self, filepath, split):
136
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
137
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
138
- with open(filepath, encoding="utf-8") as f:
139
- for key, row in enumerate(f):
140
- data = json.loads(row)
141
- if self.config.name == "ldkp":
142
- # Yields examples as (key, example) tuples
143
- yield key, {
144
- "text": data["abstract"]+data["other_sec"],
145
- "BIO_tags": data["abstract_tags"] + data["other_sec_tags"]
146
- # "answer": "" if split == "test" else data["answer"],
147
- }
148
- else:
149
- yield key, {
150
- "text": data["abstract"],
151
- "BIO_tags": data["abstract_tags"]
152
- # "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
153
- }