|
import bz2 |
|
import codecs |
|
import datasets |
|
import mwparserfromhell |
|
import json |
|
import re |
|
import string |
|
import traceback |
|
import uuid |
|
import xml.etree.cElementTree as etree |
|
|
|
from multiprocessing import Process, Manager |
|
from urllib.parse import quote |
|
from tqdm import tqdm |
|
|
|
_BASE_URL_TMPL = "https://dumps.wikimedia.org/{lang}wiki/{date}/" |
|
_CITATION = """\ |
|
@ONLINE {wikidump, |
|
author = {Wikimedia Foundation}, |
|
title = {Wikimedia Downloads}, |
|
url = {https://dumps.wikimedia.org} |
|
} |
|
""" |
|
_DATE = "20230301" |
|
_DESCRIPTION = None |
|
_INFO_FILE = "dumpstatus.json" |
|
_LICENSE = ( |
|
"This work is licensed under the Creative Commons Attribution-ShareAlike " |
|
"3.0 Unported License. To view a copy of this license, visit " |
|
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " |
|
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." |
|
) |
|
|
|
|
|
|
|
_WIKIPEDIA_LANGUAGES = [ |
|
"aa", |
|
"ab", |
|
"ace", |
|
"ady", |
|
"af", |
|
"ak", |
|
"als", |
|
"alt", |
|
"am", |
|
"ami", |
|
"an", |
|
"ang", |
|
"ar", |
|
"arc", |
|
"ary", |
|
"arz", |
|
"as", |
|
"ast", |
|
"atj", |
|
"av", |
|
"avk", |
|
"awa", |
|
"ay", |
|
"az", |
|
"azb", |
|
"ba", |
|
"ban", |
|
"bar", |
|
"bat-smg", |
|
"bcl", |
|
"be", |
|
"be-tarask", |
|
"bg", |
|
"bh", |
|
"bi", |
|
"bjn", |
|
"blk", |
|
"bm", |
|
"bn", |
|
"bo", |
|
"bpy", |
|
"br", |
|
"bs", |
|
"bug", |
|
"bxr", |
|
"ca", |
|
"cbk-zam", |
|
"cdo", |
|
"ce", |
|
"ceb", |
|
"ch", |
|
"cho", |
|
"chr", |
|
"chy", |
|
"ckb", |
|
"co", |
|
"cr", |
|
"crh", |
|
"cs", |
|
"csb", |
|
"cu", |
|
"cv", |
|
"cy", |
|
"da", |
|
"dag", |
|
"de", |
|
"din", |
|
"diq", |
|
"dsb", |
|
"dty", |
|
"dv", |
|
"dz", |
|
"ee", |
|
"el", |
|
"eml", |
|
"en", |
|
"eo", |
|
"es", |
|
"et", |
|
"eu", |
|
"ext", |
|
"fa", |
|
"ff", |
|
"fi", |
|
"fiu-vro", |
|
"fj", |
|
"fo", |
|
"fr", |
|
"frp", |
|
"frr", |
|
"fur", |
|
"fy", |
|
"ga", |
|
"gag", |
|
"gan", |
|
"gcr", |
|
"gd", |
|
"gl", |
|
"glk", |
|
"gn", |
|
"gom", |
|
"gor", |
|
"got", |
|
"gu", |
|
"guc", |
|
"gur", |
|
"guw", |
|
"gv", |
|
"ha", |
|
"hak", |
|
"haw", |
|
"he", |
|
"hi", |
|
"hif", |
|
"ho", |
|
"hr", |
|
"hsb", |
|
"ht", |
|
"hu", |
|
"hy", |
|
"hyw", |
|
"hz", |
|
"ia", |
|
"id", |
|
"ie", |
|
"ig", |
|
"ii", |
|
"ik", |
|
"ilo", |
|
"inh", |
|
"io", |
|
"is", |
|
"it", |
|
"iu", |
|
"ja", |
|
"jam", |
|
"jbo", |
|
"jv", |
|
"ka", |
|
"kaa", |
|
"kab", |
|
"kbd", |
|
"kbp", |
|
"kcg", |
|
"kg", |
|
"ki", |
|
"kj", |
|
"kk", |
|
"kl", |
|
"km", |
|
"kn", |
|
"ko", |
|
"koi", |
|
"kr", |
|
"krc", |
|
"ks", |
|
"ksh", |
|
"ku", |
|
"kv", |
|
"kw", |
|
"ky", |
|
"la", |
|
"lad", |
|
"lb", |
|
"lbe", |
|
"lez", |
|
"lfn", |
|
"lg", |
|
"li", |
|
"lij", |
|
"lld", |
|
"lmo", |
|
"ln", |
|
"lo", |
|
"lrc", |
|
"lt", |
|
"ltg", |
|
"lv", |
|
"mad", |
|
"mai", |
|
"map-bms", |
|
"mdf", |
|
"mg", |
|
"mh", |
|
"mhr", |
|
"mi", |
|
"min", |
|
"mk", |
|
"ml", |
|
"mn", |
|
"mni", |
|
"mnw", |
|
"mr", |
|
"mrj", |
|
"ms", |
|
"mt", |
|
"mus", |
|
"mwl", |
|
"my", |
|
"myv", |
|
"mzn", |
|
"na", |
|
"nah", |
|
"nap", |
|
"nds", |
|
"nds-nl", |
|
"ne", |
|
"new", |
|
"ng", |
|
"nia", |
|
"nl", |
|
"nn", |
|
"no", |
|
"nov", |
|
"nqo", |
|
"nrm", |
|
"nso", |
|
"nv", |
|
"ny", |
|
"oc", |
|
"olo", |
|
"om", |
|
"or", |
|
"os", |
|
"pa", |
|
"pag", |
|
"pam", |
|
"pap", |
|
"pcd", |
|
"pcm", |
|
"pdc", |
|
"pfl", |
|
"pi", |
|
"pih", |
|
"pl", |
|
"pms", |
|
"pnb", |
|
"pnt", |
|
"ps", |
|
"pt", |
|
"pwn", |
|
"qu", |
|
"rm", |
|
"rmy", |
|
"rn", |
|
"ro", |
|
"roa-rup", |
|
"roa-tara", |
|
"ru", |
|
"rue", |
|
"rw", |
|
"sa", |
|
"sah", |
|
"sat", |
|
"sc", |
|
"scn", |
|
"sco", |
|
"sd", |
|
"se", |
|
"sg", |
|
"sh", |
|
"shi", |
|
"shn", |
|
"si", |
|
"simple", |
|
"sk", |
|
"skr", |
|
"sl", |
|
"sm", |
|
"smn", |
|
"sn", |
|
"so", |
|
"sq", |
|
"sr", |
|
"srn", |
|
"ss", |
|
"st", |
|
"stq", |
|
"su", |
|
"sv", |
|
"sw", |
|
"szl", |
|
"szy", |
|
"ta", |
|
"tay", |
|
"tcy", |
|
"te", |
|
"tet", |
|
"tg", |
|
"th", |
|
"ti", |
|
"tk", |
|
"tl", |
|
"tn", |
|
"to", |
|
"tpi", |
|
"tr", |
|
"trv", |
|
"ts", |
|
"tt", |
|
"tum", |
|
"tw", |
|
"ty", |
|
"tyv", |
|
"udm", |
|
"ug", |
|
"uk", |
|
"ur", |
|
"uz", |
|
"ve", |
|
"vec", |
|
"vep", |
|
"vi", |
|
"vls", |
|
"vo", |
|
"wa", |
|
"war", |
|
"wo", |
|
"wuu", |
|
"xal", |
|
"xh", |
|
"xmf", |
|
"yi", |
|
"yo", |
|
"za", |
|
"zea", |
|
"zh", |
|
"zh-classical", |
|
"zh-min-nan", |
|
"zh-yue", |
|
"zu", |
|
] |
|
_VERSION = datasets.Version("2.0.0", "") |
|
|
|
core_params = { |
|
"title", |
|
"url", |
|
"accessdate", |
|
"date", |
|
"publisher", |
|
"archivedate", |
|
"archiveurl", |
|
"website", |
|
"work", |
|
"pages", |
|
"isbn", |
|
"page", |
|
"journal", |
|
"volume", |
|
"location", |
|
"doi", |
|
"issue", |
|
"newspaper", |
|
} |
|
|
|
|
|
class WikipediaCitationsConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Wikipedia Citations.""" |
|
|
|
def __init__(self, language=None, date=None, version=_VERSION, **kwargs): |
|
"""BuilderConfig for Wikipedia Citations. |
|
|
|
Args: |
|
language: string, the language code for the Wikipedia dump to use. |
|
date: string, date of the Wikipedia dump in YYYYMMDD format. A list of |
|
available dates can be found at https://dumps.wikimedia.org/enwiki/. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__( |
|
name=f"{date}.{language}", |
|
description=f"Wikipedia Citations dataset for {language}, parsed from {date} dump.", |
|
version=version, |
|
**kwargs, |
|
) |
|
self.date = date |
|
self.language = language |
|
|
|
|
|
class WikipediaCitations(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIG_CLASS = WikipediaCitationsConfig |
|
BUILDER_CONFIGS = [ |
|
WikipediaCitationsConfig( |
|
language=lang, |
|
date=_DATE, |
|
) |
|
for lang in _WIKIPEDIA_LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"wiki_id": datasets.Value("string"), |
|
"wiki_url": datasets.Value("string"), |
|
"wiki_title": datasets.Value("string"), |
|
"citation_type": datasets.Value("string"), |
|
"template": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"domain": datasets.Value("string"), |
|
"archiveurl": datasets.Value("string"), |
|
"format": datasets.Value("string"), |
|
"publisher": datasets.Value("string"), |
|
"work": datasets.Value("string"), |
|
"isbn": datasets.Value("string"), |
|
"journal": datasets.Value("string"), |
|
"volume": datasets.Value("string"), |
|
"doi": datasets.Value("string"), |
|
"issue": datasets.Value("string"), |
|
"newspaper": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://dumps.wikimedia.org", |
|
citation=_CITATION, |
|
) |
|
|
|
def _construct_url(self, title, language): |
|
|
|
return f"https://{language}.wikipedia.org/wiki/{quote(title)}" |
|
|
|
def _split_generators(self, dl_manager): |
|
def _base_url(lang): |
|
return _BASE_URL_TMPL.format( |
|
lang=lang.replace("-", "_"), date=self.config.date |
|
) |
|
|
|
lang = self.config.language |
|
|
|
info_url = _base_url(lang) + _INFO_FILE |
|
|
|
downloaded_files = dl_manager.download_and_extract({"info": info_url}) |
|
|
|
xml_urls = [] |
|
total_bytes = 0 |
|
with open(downloaded_files["info"], encoding="utf-8") as f: |
|
dump_info = json.load(f) |
|
multistream_dump_info = dump_info["jobs"]["articlesmultistreamdump"] |
|
assert ( |
|
multistream_dump_info["status"] == "done" |
|
), "Specified dump (%s) multistream status is not 'done': %s" % ( |
|
_base_url(lang), |
|
multistream_dump_info["status"], |
|
) |
|
|
|
for fname, info in multistream_dump_info["files"].items(): |
|
if ".xml" not in fname: |
|
continue |
|
total_bytes += info["size"] |
|
xml_urls.append(_base_url(lang) + fname) |
|
|
|
|
|
|
|
print("Dowloading Wikipedia dump") |
|
downloaded_files = dl_manager.download({"xml": xml_urls}) |
|
print("Finished downloading Wikipedia dump") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepaths": downloaded_files["xml"], "language": lang}, |
|
) |
|
] |
|
|
|
def _extract_content(self, filepath): |
|
"""Extracts article content from a single WikiMedia XML file.""" |
|
print("generating examples from {}".format(filepath)) |
|
content = [] |
|
f = bz2.BZ2File(filename=filepath) |
|
|
|
utf_f = codecs.getreader("utf-8")(f) |
|
context = etree.iterparse(utf_f, events=("end",)) |
|
for unused_event, elem in context: |
|
if not elem.tag.endswith("page"): |
|
continue |
|
namespace = elem.tag[:-4] |
|
title = elem.find(f"./{namespace}title").text |
|
ns = elem.find(f"./{namespace}ns").text |
|
id_ = elem.find(f"./{namespace}id").text |
|
red_ = elem.find(f"./{namespace}redirect") |
|
|
|
|
|
if ns != "0": |
|
elem.clear() |
|
continue |
|
|
|
raw_content = elem.find(f"./{namespace}revision/{namespace}text").text |
|
elem.clear() |
|
|
|
|
|
if raw_content is None or red_ is not None: |
|
continue |
|
|
|
content.append((id_, title, raw_content)) |
|
return content |
|
|
|
def _is_ref_tag(self, obj): |
|
return str(obj.tag) in {"ref"} |
|
|
|
def _normalize_role(self, text, role): |
|
role_regex = re.compile(r"{}[0-9]+".format(role)) |
|
if re.fullmatch(role_regex, text) is not None: |
|
text = role |
|
return text |
|
|
|
def _normalize_obj(self, obj): |
|
text = str(obj).strip().lower() |
|
|
|
for role in ["first", "last", "author", "editor"]: |
|
text = self._normalize_role(text, role) |
|
|
|
return text.translate(str.maketrans("", "", string.punctuation)) |
|
|
|
|
|
def _get_domain(self, url): |
|
if url is None: |
|
return None |
|
url = url.strip().lower() |
|
while url.startswith("https://web.archive.org/"): |
|
url = url[len("https://web.archive.org/web/20140109071239/"):] |
|
while url.startswith("http://web.archive.org/"): |
|
url = url[len("http://web.archive.org/web/20121023020317/"):] |
|
while url.startswith("https://archive.today/"): |
|
url = url[len("https://archive.today/20120728203512/"):] |
|
while url.startswith("http://archive.today/"): |
|
url = url[len("http://archive.today/2022.01.15-193252/"):] |
|
|
|
tokens = url.split("/") |
|
if len(tokens) < 3: |
|
return None |
|
return tokens[2] |
|
|
|
def _get_format(self, url): |
|
if url is None: |
|
return None |
|
url = url.strip().lower() |
|
|
|
prefix = url.split("?")[0] |
|
suffix = prefix.split("/")[-1] |
|
f = suffix.split(".")[-1] if "." in suffix else None |
|
return f |
|
|
|
def _parse_obj(self, obj, language): |
|
"""Cleans raw wikicode to extract citations.""" |
|
|
|
refs = [] |
|
id_, title, raw_content = obj |
|
url = self._construct_url(title, language) |
|
wikicode = mwparserfromhell.parse(raw_content, skip_style_tags=True) |
|
|
|
for i, refobj in enumerate( |
|
wikicode.ifilter_tags(matches=self._is_ref_tag, recursive=True) |
|
): |
|
try: |
|
templates = mwparserfromhell.parse(refobj).filter_templates() |
|
if templates is None or len(templates) == 0: |
|
continue |
|
for template in templates: |
|
params = {} |
|
for param in template.params: |
|
split_idx = param.find("=") |
|
key = self._normalize_obj(param[:split_idx]) |
|
val = param[split_idx + 1 :].strip() |
|
if key in core_params: |
|
params[key] = val |
|
|
|
refs.append( |
|
{ |
|
"id": str(uuid.uuid4()), |
|
"wiki_id": id_, |
|
"wiki_url": url, |
|
"wiki_title": title, |
|
"citation_type": str(template.name.strip().lower()), |
|
"template": str(template), |
|
"title": params.get("title"), |
|
"url": params.get("url"), |
|
"archiveurl": params.get("archiveurl"), |
|
"domain": self._get_domain(params.get("url")), |
|
"format": self._get_format(params.get("url")), |
|
"publisher": params.get("publisher"), |
|
"work": params.get("work"), |
|
"isbn": params.get("isbn"), |
|
"journal": params.get("journal"), |
|
"volume": params.get("volume"), |
|
"doi": params.get("doi"), |
|
"issue": params.get("issue"), |
|
"newspaper": params.get("newspaper"), |
|
} |
|
) |
|
except Exception: |
|
print(traceback.format_exc()) |
|
|
|
return refs |
|
|
|
def _generate_examples(self, filepaths, language): |
|
print("Parsing and cleaning Wikipedia examples") |
|
with Manager() as manager: |
|
examples = manager.list() |
|
processes = [] |
|
for filepath in filepaths: |
|
|
|
def parse_and_clean(examples): |
|
content = self._extract_content(filepath) |
|
for obj in tqdm(content): |
|
refs = self._parse_obj(obj, language=language) |
|
if refs is not None and len(refs) > 0: |
|
examples.extend(refs) |
|
|
|
p = Process(target=parse_and_clean, args=(examples,)) |
|
p.start() |
|
processes.append(p) |
|
|
|
for p in processes: |
|
p.join() |
|
|
|
print("Parsed and cleaned Wikipedia examples") |
|
|
|
for i, example in enumerate(examples): |
|
yield example["id"], example |
|
|