|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import re |
|
import gzip |
|
import json |
|
import datasets |
|
from pathlib import Path |
|
|
|
|
|
|
|
|
|
_CITATION = "" |
|
|
|
_DESCRIPTION = """\ |
|
French Wikipedia dataset for Entity Linking |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/GaaH/frwiki_el" |
|
|
|
_LICENSE = "WTFPL" |
|
|
|
_URLs = { |
|
"frwiki": "data/corpus.jsonl.gz", |
|
"frwiki-mini": "data/corpus_mini.jsonl.gz", |
|
"frwiki-abstracts": "data/corpus_abstracts.jsonl.gz", |
|
"entities": "data/entities.jsonl.gz", |
|
} |
|
|
|
_NER_CLASS_LABELS = [ |
|
"B", |
|
"I", |
|
"O", |
|
] |
|
|
|
_ENTITY_TYPES = [ |
|
"DATE", |
|
"PERSON", |
|
"GEOLOC", |
|
"ORG", |
|
"OTHER", |
|
] |
|
|
|
|
|
def item_to_el_features(item, title2qid): |
|
res = { |
|
"title": item['name'].replace("_", " "), |
|
"wikidata_id": item['wikidata_id'], |
|
"wikipedia_id": item['wikipedia_id'], |
|
"wikidata_url": item['wikidata_url'], |
|
"wikipedia_url": item['wikipedia_url'], |
|
} |
|
text_dict = { |
|
"words": [], |
|
"ner": [], |
|
"el": [], |
|
} |
|
entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]" |
|
|
|
|
|
i = 0 |
|
text = item['text'] |
|
for m in re.finditer(entity_pattern, text): |
|
mention_title = m.group(1) |
|
mention = m.group(2) |
|
|
|
mention_qid = title2qid.get(mention_title.replace("_", " "), "unknown") |
|
|
|
mention_words = mention.split() |
|
|
|
j = m.start(0) |
|
prev_text = text[i:j].split() |
|
len_prev_text = len(prev_text) |
|
text_dict["words"].extend(prev_text) |
|
text_dict["ner"].extend(["O"] * len_prev_text) |
|
text_dict["el"].extend([None] * len_prev_text) |
|
|
|
text_dict["words"].extend(mention_words) |
|
|
|
len_mention_tail = len(mention_words) - 1 |
|
text_dict["ner"].extend(["B"] + ["I"] * len_mention_tail) |
|
text_dict["el"].extend([mention_qid] + [mention_qid] * len_mention_tail) |
|
|
|
i = m.end(0) |
|
|
|
tail = text[i:].split() |
|
len_tail = len(tail) |
|
text_dict["words"].extend(tail) |
|
text_dict["ner"].extend(["O"] * len_tail) |
|
text_dict["el"].extend([None] * len_tail) |
|
res.update(text_dict) |
|
return res |
|
|
|
|
|
class FrwikiElDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="frwiki", version=VERSION, |
|
description="The frwiki dataset for Entity Linking"), |
|
datasets.BuilderConfig(name="frwiki-mini", version=VERSION, |
|
description="1000 first sentences of the frwiki dataset for Entity Linking"), |
|
datasets.BuilderConfig(name="frwiki-abstracts", version=VERSION, |
|
description="Abstracts (first paragraph) of the frwiki pages."), |
|
datasets.BuilderConfig(name="entities", version=VERSION, |
|
description="Entities and their descriptions"), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "frwiki" |
|
|
|
def _info(self): |
|
if self.config.name in ("frwiki", 'frwiki-mini', 'frwiki-abstracts'): |
|
features = datasets.Features({ |
|
"name": datasets.Value("string"), |
|
"wikidata_id": datasets.Value("string"), |
|
"wikipedia_id": datasets.Value("string"), |
|
"wikipedia_url": datasets.Value("string"), |
|
"wikidata_url": datasets.Value("string"), |
|
"words": [datasets.Value("string")], |
|
"ner": [datasets.ClassLabel(names=_NER_CLASS_LABELS)], |
|
"el": [datasets.Value("string")], |
|
}) |
|
elif self.config.name == "entities": |
|
features = datasets.Features({ |
|
"name": datasets.Value("string"), |
|
"wikidata_id": datasets.Value("string"), |
|
"wikipedia_id": datasets.Value("string"), |
|
"wikipedia_url": datasets.Value("string"), |
|
"wikidata_url": datasets.Value("string"), |
|
"description": datasets.Value("string"), |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
filepath = _URLs[self.config.name] |
|
path = dl_manager.download(filepath) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"path": path, |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, path): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
def _identiy(x): |
|
return x |
|
|
|
|
|
|
|
with gzip.open(open(path, 'rb'), "rt", encoding="UTF-8") as datafile: |
|
for id, line in enumerate(datafile): |
|
item = json.loads(line, parse_int=_identiy, parse_float=_identiy, parse_constant=_identiy) |
|
yield id, item |