File size: 6,176 Bytes
456b2a6 ba2ec58 456b2a6 487e277 3bc3620 456b2a6 0c4e08d 456b2a6 0c4e08d 5d93d17 5ad5573 0c4e08d 5ad5573 0c4e08d 456b2a6 5ad5573 456b2a6 a712b0f 456b2a6 a712b0f 456b2a6 7e9d748 456b2a6 184d136 456b2a6 dfc5fbe 456b2a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import re
from typing import List
import unicodedata
from pathlib import Path
from bs4 import BeautifulSoup
import datasets as ds
_DESCRIPTION = "Parallel passages from novels."
_CITATION = """
内山将夫,高橋真弓.(2003) 日英対訳文対応付けデータ.
Masao Utiyama and Mayumi Takahashi. (2003) English-Japanese Translation Alignment Data.
""".strip()
_HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/align/"
_LICENSE = None
_DOWNLOAD_URL = (
"https://www2.nict.go.jp/astrec-att/member/mutiyama/align/download/align-070215.zip"
)
def preprocess(text: str):
text = re.sub(r"<注[0-9]+>", "", text.strip())
text = re.sub(r"《.*?》", "", text)
text = re.sub(r"[#.*?]", "", text)
text = re.sub(r"([\u3040-\u309F]+)", "", text)
text = re.sub(r" − (.+) − ", "――\\1――", text)
text = re.sub(r"_(.+)_", "\\1", text)
text = re.sub(r" ``$", "''", text.strip())
text = re.sub(r"^――", "", text.strip())
text = re.sub(r"^..第", "第", text.strip())
return text.strip()
def parse_html_table(path: Path):
try:
with path.open(encoding="shift_jis") as f:
content = f.read()
except UnicodeDecodeError:
try:
with path.open(encoding="utf-8") as f:
content = f.read()
except UnicodeDecodeError:
try:
with path.open(encoding="cp932") as f:
content = f.read()
except UnicodeDecodeError:
return [], []
soup = BeautifulSoup(content, "lxml")
tables = soup.find_all("table")
texts_en, texts_ja = [], []
cur_text_en, cur_text_ja = "", ""
cur_left_parens, cur_right_parens = 0, 0
cur_left_quote, cur_right_quote = 0, 0
cur_left_parens_ja, cur_right_parens_ja = 0, 0
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0
for table in tables:
for tr in table.find_all("tr"):
text_en, _, text_ja = (preprocess(td.text) for td in tr.find_all("td"))
text_ja = unicodedata.normalize("NFKC", text_ja)
cur_left_parens += text_en.count("(")
cur_right_parens += text_en.count(")")
cur_left_quote += len(list(re.findall(r"``", text_en)))
cur_right_quote += len(list(re.findall(r"''", text_en)))
# cur_right_quote += max(
# len(list(re.findall(r"''", text_en)))
# - len(list(re.findall(r"'''", text_en))),
# 0,
# )
cur_left_parens_ja += text_ja.count("「")
cur_right_parens_ja += text_ja.count("」")
cur_left_parens_ja2 += text_ja.count("『")
cur_right_parens_ja2 += text_ja.count("』")
if (
text_ja.strip().endswith("。")
and text_en.strip().endswith(".")
and cur_left_parens == cur_right_parens
and cur_left_quote == cur_right_quote
and cur_left_parens_ja == cur_right_parens_ja
and cur_left_parens_ja2 == cur_right_parens_ja2
):
texts_en.append((cur_text_en + " " + text_en).strip())
texts_ja.append((cur_text_ja + text_ja).strip())
cur_text_en, cur_text_ja = "", ""
cur_left_parens, cur_right_parens = 0, 0
cur_left_quote, cur_right_quote = 0, 0
cur_left_parens_ja, cur_right_parens_ja = 0, 0
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0
else:
cur_text_en += " " + text_en
cur_text_ja += text_ja
texts_en.append(cur_text_en.strip())
texts_ja.append(cur_text_ja.strip())
return texts_en, texts_ja
class EnJaAlignDataset(ds.GeneratorBasedBuilder):
VERSION = ds.Version("1.0.0")
DEFAULT_CONFIG_NAME = "default"
BUILDER_CONFIGS = [
ds.BuilderConfig(
name="default",
version=VERSION,
description="",
),
]
def _info(self) -> ds.DatasetInfo:
if self.config.name == "default":
features = ds.Features(
{
"id": ds.Value("string"),
"en": ds.Value("string"),
"ja": ds.Value("string"),
"source": ds.Value("string"),
}
)
return ds.DatasetInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage=_HOMEPAGE,
license=_LICENSE,
features=features,
)
def _split_generators(self, dl_manager: ds.DownloadManager):
data_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
paths = list(Path(data_path, "align/htmPages").glob("*.htm"))
return [
ds.SplitGenerator(
name=ds.Split.TRAIN,
gen_kwargs={"paths": paths},
)
]
def _preprocess_ja(self, text: str) -> str:
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip()
text = re.sub(r"^――", "", text).strip()
return text
def _preprocess_en(self, text: str) -> str:
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip()
text = re.sub(r"```(.*?)'", "``\1", text).strip()
text = re.sub(r"``(.*?)''", r'"\1"', text).strip()
return text
def _generate_examples(self, paths: List[Path]):
for path in paths:
idx = 0
texts_en, texts_ja = parse_html_table(path)
for text_en, text_ja in zip(texts_en, texts_ja):
row = {
"id": f"{path.stem}/{idx}",
"en": self._preprocess_en(text_en),
"ja": self._preprocess_ja(text_ja),
"source": path.name,
}
if (
isinstance(row["en"], str)
and isinstance(row["ja"], str)
and len(row["en"]) > 0
and len(row["ja"]) > 0
):
yield f"{path.name}/{idx}", row
idx += 1
|