|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import pandas as pd |
|
import re |
|
|
|
import datasets |
|
from pathlib import Path |
|
|
|
|
|
def get_open_method(path): |
|
path = Path(path) |
|
ext = path.suffix |
|
|
|
if ext == ".gz": |
|
import gzip |
|
open_func = gzip.open |
|
elif ext == ".bz2": |
|
import bz2 |
|
open_func = bz2.open |
|
else: |
|
open_func = open |
|
return open_func |
|
|
|
|
|
def read_file(path): |
|
open_func = get_open_method(path) |
|
with open_func(path, "rt", encoding="UTF-8") as f: |
|
return f.read() |
|
|
|
|
|
|
|
|
|
_CITATION = "" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
French Wikipedia dataset for Entity Linking |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"frwiki": "", |
|
} |
|
|
|
_CLASS_LABELS = [ |
|
"B", |
|
"I", |
|
"O", |
|
] |
|
|
|
|
|
def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, title2wikidata): |
|
res = { |
|
"title": doc_title.replace("_", " "), |
|
"qid": doc_qid, |
|
} |
|
text_dict = { |
|
"words": [], |
|
"labels": [], |
|
"qids": [], |
|
"titles": [], |
|
"wikipedia": [], |
|
"wikidata": [], |
|
} |
|
entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]" |
|
|
|
|
|
i = 0 |
|
for m in re.finditer(entity_pattern, text): |
|
mention_title = m.group(1) |
|
mention = m.group(2) |
|
|
|
mention_qid = title2qid.get(mention_title, "") |
|
mention_wikipedia = title2wikipedia.get(mention_title, "") |
|
mention_wikidata = title2wikidata.get(mention_title, "") |
|
|
|
|
|
mention_wikipedia = re.sub(entity_pattern, r"\2", mention_wikipedia) |
|
|
|
mention_wikidata = re.sub(entity_pattern, r"\2", mention_wikidata) |
|
|
|
|
|
|
|
|
|
|
|
mention_words = mention.split() |
|
|
|
j = m.start(0) |
|
prev_text = text[i:j].split() |
|
len_prev_text = len(prev_text) |
|
text_dict["words"].extend(prev_text) |
|
text_dict["labels"].extend(["O"] * len_prev_text) |
|
text_dict["qids"].extend([None] * len_prev_text) |
|
text_dict["titles"].extend([None] * len_prev_text) |
|
text_dict["wikipedia"].extend([None] * len_prev_text) |
|
text_dict["wikidata"].extend([None] * len_prev_text) |
|
|
|
text_dict["words"].extend(mention_words) |
|
|
|
|
|
if mention_wikipedia == "": |
|
len_mention = len(mention_words) |
|
text_dict["labels"].extend(["O"] * len_mention) |
|
text_dict["qids"].extend([None] * len_mention) |
|
text_dict["titles"].extend([None] * len_mention) |
|
text_dict["wikipedia"].extend([None] * len_mention) |
|
text_dict["wikidata"].extend([None] * len_mention) |
|
else: |
|
len_mention_tail = len(mention_words) - 1 |
|
wikipedia_words = mention_wikipedia.split() |
|
wikidata_words = mention_wikidata.split() |
|
title_words = mention_title.replace("_", " ").split() |
|
|
|
text_dict["labels"].extend(["B"] + ["I"] * len_mention_tail) |
|
text_dict["qids"].extend([mention_qid] + [None] * len_mention_tail) |
|
text_dict["titles"].extend( |
|
[mention_title] + [None] * len_mention_tail) |
|
text_dict["wikipedia"].extend( |
|
[mention_wikipedia] + [None] * len_mention_tail) |
|
text_dict["wikidata"].extend( |
|
[mention_wikidata] + [None] * len_mention_tail) |
|
|
|
i = m.end(0) |
|
|
|
tail = text[i:].split() |
|
len_tail = len(tail) |
|
text_dict["words"].extend(tail) |
|
text_dict["labels"].extend(["O"] * len_tail) |
|
text_dict["qids"].extend([None] * len_tail) |
|
text_dict["titles"].extend([None] * len_tail) |
|
text_dict["wikipedia"].extend([None] * len_tail) |
|
text_dict["wikidata"].extend([None] * len_tail) |
|
res["text"] = text_dict |
|
return res |
|
|
|
|
|
class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="frwiki", version=VERSION, |
|
description="The frwiki dataset for Entity Linking"), |
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "frwiki" |
|
|
|
def _info(self): |
|
if self.config.name == "frwiki": |
|
features = datasets.Features({ |
|
"title": datasets.Value("string"), |
|
"qid": datasets.Value("string"), |
|
"text": { |
|
"words": [datasets.Value("string")], |
|
"wikipedia": [datasets.Value("string")], |
|
"wikidata": [datasets.Value("string")], |
|
"labels": [datasets.ClassLabel(names=_CLASS_LABELS)], |
|
"titles": [datasets.Value("string")], |
|
"qids": [datasets.Value("string")], |
|
} |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"dataset_dir": Path(".", "data", "good-pages"), |
|
"split": "train" |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
|
|
self, dataset_dir, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(Path(dataset_dir, "list-good-pages.txt"), "rt", encoding="UTF-8") as f: |
|
good_pages_list = f.read().split("\n") |
|
|
|
wiki_df = pd.read_csv(Path(dataset_dir, "scrapped", "final-dataset.csv"), |
|
dtype=str, na_filter=False) |
|
|
|
title2qid = dict(zip(wiki_df["title"], wiki_df["qid"])) |
|
title2path = dict(zip(wiki_df["title"], wiki_df["path"])) |
|
title2wikipedia = dict( |
|
zip(wiki_df["title"], wiki_df["wikipedia_description"])) |
|
title2wikidata = dict( |
|
zip(wiki_df["title"], wiki_df["wikidata_description"])) |
|
|
|
good_pages_list = [ |
|
gp.strip() |
|
for gp in good_pages_list |
|
if title2path[gp] != "" and gp.strip() != "" |
|
] |
|
|
|
for id, title in enumerate(good_pages_list): |
|
qid = title2qid[title] |
|
path = title2path[title] |
|
text = read_file(path) |
|
|
|
features = text_to_el_features( |
|
qid, title, text, title2qid, title2wikipedia, title2wikidata) |
|
yield id, features |
|
|