|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Fine-grained Named Entity Recognition in Legal Documents""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{leitner2019fine, |
|
author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider}, |
|
title = {{Fine-grained Named Entity Recognition in Legal Documents}}, |
|
booktitle = {Semantic Systems. The Power of AI and Knowledge |
|
Graphs. Proceedings of the 15th International Conference |
|
(SEMANTiCS 2019)}, |
|
year = 2019, |
|
editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria |
|
Maleshkova and Tassilo Pellegrini and Harald Sack and York |
|
Sure-Vetter}, |
|
keywords = {aip}, |
|
publisher = {Springer}, |
|
series = {Lecture Notes in Computer Science}, |
|
number = {11702}, |
|
address = {Karlsruhe, Germany}, |
|
month = 9, |
|
note = {10/11 September 2019}, |
|
pages = {272--287}, |
|
pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
We describe a dataset developed for Named Entity Recognition in German federal court decisions. |
|
It consists of approx. 67,000 sentences with over 2 million tokens. |
|
The resource contains 54,000 manually annotated entities, mapped to 19 fine-grained semantic classes: |
|
person, judge, lawyer, country, city, street, landscape, organization, company, institution, court, brand, law, |
|
ordinance, European legal norm, regulation, contract, court decision, and legal literature. |
|
The legal documents were, furthermore, automatically annotated with more than 35,000 TimeML-based time expressions. |
|
The dataset, which is available under a CC-BY 4.0 license in the CoNNL-2002 format, |
|
was developed for training an NER service for German legal documents in the EU project Lynx. |
|
""" |
|
|
|
_URL = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler.conll" |
|
|
|
|
|
class Ler(datasets.GeneratorBasedBuilder): |
|
""" |
|
We describe a dataset developed for Named Entity Recognition in German federal court decisions. |
|
It consists of approx. 67,000 sentences with over 2 million tokens. |
|
The resource contains 54,000 manually annotated entities, mapped to 19 fine-grained semantic classes: |
|
person, judge, lawyer, country, city, street, landscape, organization, company, institution, court, brand, law, |
|
ordinance, European legal norm, regulation, contract, court decision, and legal literature. |
|
The legal documents were, furthermore, automatically annotated with more than 35,000 TimeML-based time expressions. |
|
The dataset, which is available under a CC-BY 4.0 license in the CoNNL-2002 format, |
|
was developed for training an NER service for German legal documents in the EU project Lynx. |
|
""" |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"B-PER", |
|
"I-PER", |
|
"B-RR", |
|
"I-RR", |
|
"B-AN", |
|
"I-AN", |
|
"B-LD", |
|
"I-LD", |
|
"B-ST", |
|
"I-ST", |
|
"B-STR", |
|
"I-STR", |
|
"B-LDS", |
|
"I-LDS", |
|
"B-ORG", |
|
"I-ORG", |
|
"B-UN", |
|
"I-UN", |
|
"B-INN", |
|
"I-INN", |
|
"B-GRT", |
|
"I-GRT", |
|
"B-MRK", |
|
"I-MRK", |
|
"B-GS", |
|
"I-GS", |
|
"B-VO", |
|
"I-VO", |
|
"B-EUN", |
|
"I-EUN", |
|
"B-VS", |
|
"I-VS", |
|
"B-VT", |
|
"I-VT", |
|
"B-RS", |
|
"I-RS", |
|
"B-LIT", |
|
"I-LIT", |
|
] |
|
) |
|
), |
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=datasets.info.SupervisedKeysData(input="tokens", output="ner_tags"), |
|
|
|
homepage="https://github.com/elenanereiss/Legal-Entity-Recognition", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
dl_file = dl_manager.download(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": dl_file}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
""" Yields examples. """ |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
guid = 0 |
|
tokens = [] |
|
ner_tags = [] |
|
for line in f: |
|
if line == "" or line == "\n": |
|
if tokens: |
|
yield guid, {"id": guid, "tokens": tokens, "ner_tags": ner_tags} |
|
guid += 1 |
|
tokens = [] |
|
ner_tags = [] |
|
else: |
|
|
|
splits = line.split(" ") |
|
tokens.append(splits[0]) |
|
ner_tags.append(splits[1].rstrip()) |
|
|