parquet-converter commited on
Commit
0c97286
·
1 Parent(s): 3cf237e

Update parquet files

Browse files
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ default/ler-train.parquet filter=lfs diff=lfs merge=lfs -text
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "We describe a dataset developed for Named Entity Recognition in German federal court decisions. \nIt consists of approx. 67,000 sentences with over 2 million tokens. \nThe resource contains 54,000 manually annotated entities, mapped to 19 fine-grained semantic classes: \nperson, judge, lawyer, country, city, street, landscape, organization, company, institution, court, brand, law, \nordinance, European legal norm, regulation, contract, court decision, and legal literature. \nThe legal documents were, furthermore, automatically annotated with more than 35,000 TimeML-based time expressions. \nThe dataset, which is available under a CC-BY 4.0 license in the CoNNL-2002 format, \nwas developed for training an NER service for German legal documents in the EU project Lynx.\n", "citation": "@inproceedings{leitner2019fine,\n author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},\n title = {{Fine-grained Named Entity Recognition in Legal Documents}},\n booktitle = {Semantic Systems. The Power of AI and Knowledge\n Graphs. Proceedings of the 15th International Conference\n (SEMANTiCS 2019)},\n year = 2019,\n editor = {Maribel Acosta and Philippe Cudr\u00e9-Mauroux and Maria\n Maleshkova and Tassilo Pellegrini and Harald Sack and York\n Sure-Vetter},\n keywords = {aip},\n publisher = {Springer},\n series = {Lecture Notes in Computer Science},\n number = {11702},\n address = {Karlsruhe, Germany},\n month = 9,\n note = {10/11 September 2019},\n pages = {272--287},\n pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}\n}\n", "homepage": "https://github.com/elenanereiss/Legal-Entity-Recognition", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 39, "names": ["O", "B-PER", "I-PER", "B-RR", "I-RR", "B-AN", "I-AN", "B-LD", "I-LD", "B-ST", "I-ST", "B-STR", "I-STR", "B-LDS", "I-LDS", "B-ORG", "I-ORG", "B-UN", "I-UN", "B-INN", "I-INN", "B-GRT", "I-GRT", "B-MRK", "I-MRK", "B-GS", "I-GS", "B-VO", "I-VO", "B-EUN", "I-EUN", "B-VS", "I-VS", "B-VT", "I-VT", "B-RS", "I-RS", "B-LIT", "I-LIT"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": {"input": "tokens", "output": "ner_tags"}, "builder_name": "ler", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38531395, "num_examples": 66723, "dataset_name": "ler"}}, "download_checksums": {"https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler.conll": {"num_bytes": 19692859, "checksum": "b05bf29720519d3d4a871677189035390607140887e871e30e8abc68ed01581f"}}, "download_size": 19692859, "post_processing_size": null, "dataset_size": 38531395, "size_in_bytes": 58224254}}
 
 
default/ler-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c26bf3b30384c897060969f96a324101f1c65082c7b65b3dd2475b1891ffef
3
+ size 7061354
dummy/.DS_Store DELETED
Binary file (6.15 kB)
 
dummy/1.0.0/.DS_Store DELETED
Binary file (6.15 kB)
 
dummy/1.0.0/dummy_data.zip DELETED
Binary file (469 Bytes)
 
dummy/1.0.0/dummy_data/ler.conll DELETED
@@ -1,8 +0,0 @@
1
- Prozesskostenhilfe O
2
- - O
3
- Entschädigung O
4
- für O
5
- überlange O
6
- Verfahrensdauer O
7
- - O
8
- Revisionsverfahren O
 
 
 
 
 
 
 
 
 
ler.py DELETED
@@ -1,166 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Fine-grained Named Entity Recognition in Legal Documents"""
16
-
17
- from __future__ import absolute_import, division, print_function
18
-
19
- import datasets
20
-
21
- _CITATION = """\
22
- @inproceedings{leitner2019fine,
23
- author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
24
- title = {{Fine-grained Named Entity Recognition in Legal Documents}},
25
- booktitle = {Semantic Systems. The Power of AI and Knowledge
26
- Graphs. Proceedings of the 15th International Conference
27
- (SEMANTiCS 2019)},
28
- year = 2019,
29
- editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria
30
- Maleshkova and Tassilo Pellegrini and Harald Sack and York
31
- Sure-Vetter},
32
- keywords = {aip},
33
- publisher = {Springer},
34
- series = {Lecture Notes in Computer Science},
35
- number = {11702},
36
- address = {Karlsruhe, Germany},
37
- month = 9,
38
- note = {10/11 September 2019},
39
- pages = {272--287},
40
- pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}
41
- }
42
- """
43
-
44
- _DESCRIPTION = """\
45
- We describe a dataset developed for Named Entity Recognition in German federal court decisions.
46
- It consists of approx. 67,000 sentences with over 2 million tokens.
47
- The resource contains 54,000 manually annotated entities, mapped to 19 fine-grained semantic classes:
48
- person, judge, lawyer, country, city, street, landscape, organization, company, institution, court, brand, law,
49
- ordinance, European legal norm, regulation, contract, court decision, and legal literature.
50
- The legal documents were, furthermore, automatically annotated with more than 35,000 TimeML-based time expressions.
51
- The dataset, which is available under a CC-BY 4.0 license in the CoNNL-2002 format,
52
- was developed for training an NER service for German legal documents in the EU project Lynx.
53
- """
54
-
55
- _URL = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler.conll"
56
-
57
-
58
- class Ler(datasets.GeneratorBasedBuilder):
59
- """
60
- We describe a dataset developed for Named Entity Recognition in German federal court decisions.
61
- It consists of approx. 67,000 sentences with over 2 million tokens.
62
- The resource contains 54,000 manually annotated entities, mapped to 19 fine-grained semantic classes:
63
- person, judge, lawyer, country, city, street, landscape, organization, company, institution, court, brand, law,
64
- ordinance, European legal norm, regulation, contract, court decision, and legal literature.
65
- The legal documents were, furthermore, automatically annotated with more than 35,000 TimeML-based time expressions.
66
- The dataset, which is available under a CC-BY 4.0 license in the CoNNL-2002 format,
67
- was developed for training an NER service for German legal documents in the EU project Lynx.
68
- """
69
- VERSION = datasets.Version("1.0.0")
70
-
71
- def _info(self):
72
- return datasets.DatasetInfo(
73
- # This is the description that will appear on the datasets page.
74
- description=_DESCRIPTION,
75
- # This defines the different columns of the dataset and their types
76
- features=datasets.Features(
77
- {
78
- "id": datasets.Value("int32"),
79
- "tokens": datasets.Sequence(datasets.Value("string")),
80
- "ner_tags": datasets.Sequence(
81
- datasets.ClassLabel(
82
- names=[
83
- "O",
84
- "B-PER",
85
- "I-PER",
86
- "B-RR",
87
- "I-RR",
88
- "B-AN",
89
- "I-AN",
90
- "B-LD",
91
- "I-LD",
92
- "B-ST",
93
- "I-ST",
94
- "B-STR",
95
- "I-STR",
96
- "B-LDS",
97
- "I-LDS",
98
- "B-ORG",
99
- "I-ORG",
100
- "B-UN",
101
- "I-UN",
102
- "B-INN",
103
- "I-INN",
104
- "B-GRT",
105
- "I-GRT",
106
- "B-MRK",
107
- "I-MRK",
108
- "B-GS",
109
- "I-GS",
110
- "B-VO",
111
- "I-VO",
112
- "B-EUN",
113
- "I-EUN",
114
- "B-VS",
115
- "I-VS",
116
- "B-VT",
117
- "I-VT",
118
- "B-RS",
119
- "I-RS",
120
- "B-LIT",
121
- "I-LIT",
122
- ]
123
- )
124
- ),
125
- }
126
- ),
127
- # If there's a common (input, target) tuple from the features,
128
- # specify them here. They'll be used if as_supervised=True in
129
- # builder.as_dataset.
130
- supervised_keys=datasets.info.SupervisedKeysData(input="tokens", output="ner_tags"),
131
- # Homepage of the dataset for documentation
132
- homepage="https://github.com/elenanereiss/Legal-Entity-Recognition",
133
- citation=_CITATION,
134
- )
135
-
136
- def _split_generators(self, dl_manager):
137
- """Returns SplitGenerators."""
138
- # dl_manager is a datasets.download.DownloadManager that can be used to
139
- # download and extract URLs
140
- dl_file = dl_manager.download(_URL)
141
- return [
142
- datasets.SplitGenerator(
143
- name=datasets.Split.TRAIN,
144
- # These kwargs will be passed to _generate_examples
145
- gen_kwargs={"filepath": dl_file},
146
- ),
147
- ]
148
-
149
- def _generate_examples(self, filepath):
150
- """ Yields examples. """
151
- with open(filepath, "r", encoding="utf-8") as f:
152
- guid = 0
153
- tokens = []
154
- ner_tags = []
155
- for line in f:
156
- if line == "" or line == "\n":
157
- if tokens:
158
- yield guid, {"id": guid, "tokens": tokens, "ner_tags": ner_tags}
159
- guid += 1
160
- tokens = []
161
- ner_tags = []
162
- else:
163
- # conll2002 tokens are space separated
164
- splits = line.split(" ")
165
- tokens.append(splits[0])
166
- ner_tags.append(splits[1].rstrip())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ler.py.lock DELETED
File without changes