Datasets:
ArXiv:
License:
update
Browse files- README.md +3 -0
- data/amazon_reviews_multi.jsonl +2 -2
- data/mike0307.jsonl +3 -0
- data/nbnn.jsonl +2 -2
- data/nordic_langid.jsonl +2 -2
- data/stsb_multi_mt.jsonl +2 -2
- data/xnli.jsonl +2 -2
- examples/preprocess/preprocess_amazon_reviews_multi.py +20 -2
- examples/preprocess/preprocess_mike0307.py +99 -0
- examples/preprocess/preprocess_nbnn.py +16 -7
- examples/preprocess/preprocess_nordic_langid.py +17 -3
- examples/preprocess/preprocess_scandi_langid.py +16 -2
- examples/preprocess/preprocess_stsb_multi_mt.py +21 -5
- examples/preprocess/preprocess_xnli.py +20 -3
- language_identification.py +18 -5
- main.py +7 -1
README.md
CHANGED
@@ -18,6 +18,9 @@ license: apache-2.0
|
|
18 |
| nbnn | [oai-nb-no-sbr-80](https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-80/) | TRAIN: 1556212, VALID: 1957, TEST: 1944 | 该语料库包含挪威电报局 (NTB) 的新闻文本从博克马尔语翻译成新挪威语的内容。 | [NbAiLab/nbnn_language_detection](https://huggingface.co/datasets/NbAiLab/nbnn_language_detection) |
|
19 |
| scandi_langid | | TRAIN: 239618, TEST: 59840 | | [kardosdrur/scandi-langid](https://huggingface.co/datasets/kardosdrur/scandi-langid) |
|
20 |
| nordic_langid | [Discriminating Between Similar Nordic Languages](https://aclanthology.org/2021.vardial-1.8/) | TRAIN: 226159, TEST: 10700 | 重点关注六种北欧语言之间的区别:丹麦语、瑞典语、挪威语(尼诺斯克语)、挪威语(博克马尔语)、法罗语和冰岛语。 | [strombergnlp/nordic_langid](https://huggingface.co/datasets/strombergnlp/nordic_langid) |
|
|
|
|
|
|
|
21 |
|
22 |
|
23 |
|
|
|
18 |
| nbnn | [oai-nb-no-sbr-80](https://www.nb.no/sprakbanken/ressurskatalog/oai-nb-no-sbr-80/) | TRAIN: 1556212, VALID: 1957, TEST: 1944 | 该语料库包含挪威电报局 (NTB) 的新闻文本从博克马尔语翻译成新挪威语的内容。 | [NbAiLab/nbnn_language_detection](https://huggingface.co/datasets/NbAiLab/nbnn_language_detection) |
|
19 |
| scandi_langid | | TRAIN: 239618, TEST: 59840 | | [kardosdrur/scandi-langid](https://huggingface.co/datasets/kardosdrur/scandi-langid) |
|
20 |
| nordic_langid | [Discriminating Between Similar Nordic Languages](https://aclanthology.org/2021.vardial-1.8/) | TRAIN: 226159, TEST: 10700 | 重点关注六种北欧语言之间的区别:丹麦语、瑞典语、挪威语(尼诺斯克语)、挪威语(博克马尔语)、法罗语和冰岛语。 | [strombergnlp/nordic_langid](https://huggingface.co/datasets/strombergnlp/nordic_langid) |
|
21 |
+
| mike0307 | [Mike0307/language-detection](https://huggingface.co/datasets/Mike0307/language-detection) | TRAIN: 33095, VALID: 4040, TEST: 4048 | | |
|
22 |
+
| tatoeba | [tatoeba](https://tatoeba.org/); [Tatoeba Paper](https://arxiv.org/abs/1812.10464v2) | | Tatoeba 是句子和翻译的集合。 | [tatoeba](https://huggingface.co/datasets/tatoeba) |
|
23 |
+
| bucc2018 | [bucc2018](https://comparable.limsi.fr/bucc2018/bucc2018-task.html) | | 共享任务:识别可比语料库中的平行句子 | |
|
24 |
|
25 |
|
26 |
|
data/amazon_reviews_multi.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c9aca66f5c78a0aaaaf937d4aa4c61e88f4d9125c757d4668c50f66e525ea64
|
3 |
+
size 352729340
|
data/mike0307.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27fdb1197a2b6ada52ce9509be86701c627ac6f61deba60c6d85b4063aae2369
|
3 |
+
size 13235714
|
data/nbnn.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b6398b7e9adc966e646fb2135d5a654936923884f76d749bc5e0b81f2c69cc6
|
3 |
+
size 274464082
|
data/nordic_langid.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71d22ab36dd2bec0b5fa78063407531298eb310f70bc9f20e0a00e5329918850
|
3 |
+
size 48047359
|
data/stsb_multi_mt.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a581d6ebc8ab00e13d81fa1b5b7b5aaa7201dc47653698bddd62953176c1d9c
|
3 |
+
size 24033913
|
data/xnli.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e741e58100d8f3cb5aa90da2c91097419d49d61850e153682f5ad8d97b556981
|
3 |
+
size 1410472195
|
examples/preprocess/preprocess_amazon_reviews_multi.py
CHANGED
@@ -45,14 +45,32 @@ def main():
|
|
45 |
)
|
46 |
print(dataset_dict)
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
text_set = set()
|
49 |
counter = defaultdict(int)
|
50 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
51 |
for k, v in dataset_dict.items():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
for sample in tqdm(v):
|
53 |
|
54 |
text = sample["review_body"]
|
55 |
language = sample["language"]
|
|
|
56 |
|
57 |
text = text.strip()
|
58 |
|
@@ -67,11 +85,11 @@ def main():
|
|
67 |
"text": text,
|
68 |
"language": language,
|
69 |
"data_source": "amazon_reviews_multi",
|
70 |
-
"split":
|
71 |
}
|
72 |
row = json.dumps(row, ensure_ascii=False)
|
73 |
f.write("{}\n".format(row))
|
74 |
-
counter[
|
75 |
|
76 |
print("counter: {}".format(counter))
|
77 |
|
|
|
45 |
)
|
46 |
print(dataset_dict)
|
47 |
|
48 |
+
split_map = {
|
49 |
+
"dev": "validation",
|
50 |
+
"validate": "validation"
|
51 |
+
}
|
52 |
+
|
53 |
+
language_map = {
|
54 |
+
"zh": "zh-cn"
|
55 |
+
}
|
56 |
+
|
57 |
text_set = set()
|
58 |
counter = defaultdict(int)
|
59 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
60 |
for k, v in dataset_dict.items():
|
61 |
+
if k in split_map.keys():
|
62 |
+
split = split_map[k]
|
63 |
+
else:
|
64 |
+
split = k
|
65 |
+
if split not in ("train", "validation", "test"):
|
66 |
+
print("skip split: {}".format(split))
|
67 |
+
continue
|
68 |
+
|
69 |
for sample in tqdm(v):
|
70 |
|
71 |
text = sample["review_body"]
|
72 |
language = sample["language"]
|
73 |
+
language = language_map.get(language, language)
|
74 |
|
75 |
text = text.strip()
|
76 |
|
|
|
85 |
"text": text,
|
86 |
"language": language,
|
87 |
"data_source": "amazon_reviews_multi",
|
88 |
+
"split": split
|
89 |
}
|
90 |
row = json.dumps(row, ensure_ascii=False)
|
91 |
f.write("{}\n".format(row))
|
92 |
+
counter[split] += 1
|
93 |
|
94 |
print("counter: {}".format(counter))
|
95 |
|
examples/preprocess/preprocess_mike0307.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
from collections import defaultdict
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, "../../"))
|
11 |
+
|
12 |
+
from datasets import load_dataset, DownloadMode
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from language_identification import LANGUAGE_MAP
|
16 |
+
from project_settings import project_path
|
17 |
+
|
18 |
+
|
19 |
+
def get_args():
|
20 |
+
parser = argparse.ArgumentParser()
|
21 |
+
parser.add_argument("--dataset_path", default="Mike0307/language-detection", type=str)
|
22 |
+
parser.add_argument(
|
23 |
+
"--dataset_cache_dir",
|
24 |
+
default=(project_path / "hub_datasets").as_posix(),
|
25 |
+
type=str
|
26 |
+
)
|
27 |
+
parser.add_argument(
|
28 |
+
"--output_file",
|
29 |
+
default=(project_path / "data/mike0307.jsonl"),
|
30 |
+
type=str
|
31 |
+
)
|
32 |
+
|
33 |
+
args = parser.parse_args()
|
34 |
+
return args
|
35 |
+
|
36 |
+
|
37 |
+
def main():
|
38 |
+
args = get_args()
|
39 |
+
|
40 |
+
dataset_dict = load_dataset(
|
41 |
+
path=args.dataset_path,
|
42 |
+
cache_dir=args.dataset_cache_dir,
|
43 |
+
# download_mode=DownloadMode.FORCE_REDOWNLOAD
|
44 |
+
)
|
45 |
+
print(dataset_dict)
|
46 |
+
|
47 |
+
split_map = {
|
48 |
+
"dev": "validation",
|
49 |
+
"validate": "validation"
|
50 |
+
}
|
51 |
+
|
52 |
+
language_map = {
|
53 |
+
"zh-TW": "zh-tw"
|
54 |
+
}
|
55 |
+
|
56 |
+
text_set = set()
|
57 |
+
counter = defaultdict(int)
|
58 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
59 |
+
for k, v in dataset_dict.items():
|
60 |
+
if k in split_map.keys():
|
61 |
+
split = split_map[k]
|
62 |
+
else:
|
63 |
+
split = k
|
64 |
+
if split not in ("train", "validation", "test"):
|
65 |
+
print("skip split: {}".format(split))
|
66 |
+
continue
|
67 |
+
|
68 |
+
for sample in tqdm(v):
|
69 |
+
|
70 |
+
text = sample["text"]
|
71 |
+
language = sample["language_code"]
|
72 |
+
language = language_map.get(language, language)
|
73 |
+
|
74 |
+
text = text.strip()
|
75 |
+
|
76 |
+
if text in text_set:
|
77 |
+
continue
|
78 |
+
text_set.add(text)
|
79 |
+
|
80 |
+
if language not in LANGUAGE_MAP.keys():
|
81 |
+
raise AssertionError(language)
|
82 |
+
|
83 |
+
row = {
|
84 |
+
"text": text,
|
85 |
+
"language": language,
|
86 |
+
"data_source": "mike0307",
|
87 |
+
"split": split
|
88 |
+
}
|
89 |
+
row = json.dumps(row, ensure_ascii=False)
|
90 |
+
f.write("{}\n".format(row))
|
91 |
+
counter[split] += 1
|
92 |
+
|
93 |
+
print("counter: {}".format(counter))
|
94 |
+
|
95 |
+
return
|
96 |
+
|
97 |
+
|
98 |
+
if __name__ == '__main__':
|
99 |
+
main()
|
examples/preprocess/preprocess_nbnn.py
CHANGED
@@ -45,18 +45,27 @@ def main():
|
|
45 |
print(dataset_dict)
|
46 |
|
47 |
language_map = {
|
48 |
-
"nno": "
|
49 |
-
"nob": "
|
|
|
|
|
|
|
|
|
|
|
50 |
}
|
51 |
|
52 |
text_set = set()
|
53 |
counter = defaultdict(int)
|
54 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
55 |
for k, v in dataset_dict.items():
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
continue
|
58 |
-
if k == "dev":
|
59 |
-
k = "validation"
|
60 |
|
61 |
for sample in tqdm(v):
|
62 |
|
@@ -77,11 +86,11 @@ def main():
|
|
77 |
"text": text,
|
78 |
"language": language,
|
79 |
"data_source": "nbnn",
|
80 |
-
"split":
|
81 |
}
|
82 |
row = json.dumps(row, ensure_ascii=False)
|
83 |
f.write("{}\n".format(row))
|
84 |
-
counter[
|
85 |
|
86 |
print("counter: {}".format(counter))
|
87 |
|
|
|
45 |
print(dataset_dict)
|
46 |
|
47 |
language_map = {
|
48 |
+
"nno": "no-n",
|
49 |
+
"nob": "no-b"
|
50 |
+
}
|
51 |
+
|
52 |
+
split_map = {
|
53 |
+
"dev": "validation",
|
54 |
+
"validate": "validation"
|
55 |
}
|
56 |
|
57 |
text_set = set()
|
58 |
counter = defaultdict(int)
|
59 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
60 |
for k, v in dataset_dict.items():
|
61 |
+
|
62 |
+
if k in split_map.keys():
|
63 |
+
split = split_map[k]
|
64 |
+
else:
|
65 |
+
split = k
|
66 |
+
if split not in ("train", "validation", "test"):
|
67 |
+
print("skip split: {}".format(split))
|
68 |
continue
|
|
|
|
|
69 |
|
70 |
for sample in tqdm(v):
|
71 |
|
|
|
86 |
"text": text,
|
87 |
"language": language,
|
88 |
"data_source": "nbnn",
|
89 |
+
"split": split
|
90 |
}
|
91 |
row = json.dumps(row, ensure_ascii=False)
|
92 |
f.write("{}\n".format(row))
|
93 |
+
counter[split] += 1
|
94 |
|
95 |
print("counter: {}".format(counter))
|
96 |
|
examples/preprocess/preprocess_nordic_langid.py
CHANGED
@@ -45,12 +45,26 @@ def main():
|
|
45 |
)
|
46 |
print(dataset_dict)
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
text_set = set()
|
51 |
counter = defaultdict(int)
|
52 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
53 |
for k, v in dataset_dict.items():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
for sample in tqdm(v):
|
55 |
|
56 |
text = sample["sentence"]
|
@@ -70,11 +84,11 @@ def main():
|
|
70 |
"text": text,
|
71 |
"language": language,
|
72 |
"data_source": "nordic_langid",
|
73 |
-
"split":
|
74 |
}
|
75 |
row = json.dumps(row, ensure_ascii=False)
|
76 |
f.write("{}\n".format(row))
|
77 |
-
counter[
|
78 |
|
79 |
print("counter: {}".format(counter))
|
80 |
|
|
|
45 |
)
|
46 |
print(dataset_dict)
|
47 |
|
48 |
+
split_map = {
|
49 |
+
"dev": "validation",
|
50 |
+
"validate": "validation"
|
51 |
+
}
|
52 |
+
|
53 |
+
index_to_language = ["no-b", "is", "no-n", "sv", "fo", "da"]
|
54 |
|
55 |
text_set = set()
|
56 |
counter = defaultdict(int)
|
57 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
58 |
for k, v in dataset_dict.items():
|
59 |
+
|
60 |
+
if k in split_map.keys():
|
61 |
+
split = split_map[k]
|
62 |
+
else:
|
63 |
+
split = k
|
64 |
+
if split not in ("train", "validation", "test"):
|
65 |
+
print("skip split: {}".format(split))
|
66 |
+
continue
|
67 |
+
|
68 |
for sample in tqdm(v):
|
69 |
|
70 |
text = sample["sentence"]
|
|
|
84 |
"text": text,
|
85 |
"language": language,
|
86 |
"data_source": "nordic_langid",
|
87 |
+
"split": split
|
88 |
}
|
89 |
row = json.dumps(row, ensure_ascii=False)
|
90 |
f.write("{}\n".format(row))
|
91 |
+
counter[split] += 1
|
92 |
|
93 |
print("counter: {}".format(counter))
|
94 |
|
examples/preprocess/preprocess_scandi_langid.py
CHANGED
@@ -44,10 +44,24 @@ def main():
|
|
44 |
)
|
45 |
print(dataset_dict)
|
46 |
|
|
|
|
|
|
|
|
|
|
|
47 |
text_set = set()
|
48 |
counter = defaultdict(int)
|
49 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
50 |
for k, v in dataset_dict.items():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
for sample in tqdm(v):
|
52 |
|
53 |
text = sample["text"]
|
@@ -66,11 +80,11 @@ def main():
|
|
66 |
"text": text,
|
67 |
"language": language,
|
68 |
"data_source": "scandi_langid",
|
69 |
-
"split":
|
70 |
}
|
71 |
row = json.dumps(row, ensure_ascii=False)
|
72 |
f.write("{}\n".format(row))
|
73 |
-
counter[
|
74 |
|
75 |
print("counter: {}".format(counter))
|
76 |
|
|
|
44 |
)
|
45 |
print(dataset_dict)
|
46 |
|
47 |
+
split_map = {
|
48 |
+
"dev": "validation",
|
49 |
+
"validate": "validation"
|
50 |
+
}
|
51 |
+
|
52 |
text_set = set()
|
53 |
counter = defaultdict(int)
|
54 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
55 |
for k, v in dataset_dict.items():
|
56 |
+
|
57 |
+
if k in split_map.keys():
|
58 |
+
split = split_map[k]
|
59 |
+
else:
|
60 |
+
split = k
|
61 |
+
if split not in ("train", "validation", "test"):
|
62 |
+
print("skip split: {}".format(split))
|
63 |
+
continue
|
64 |
+
|
65 |
for sample in tqdm(v):
|
66 |
|
67 |
text = sample["text"]
|
|
|
80 |
"text": text,
|
81 |
"language": language,
|
82 |
"data_source": "scandi_langid",
|
83 |
+
"split": split
|
84 |
}
|
85 |
row = json.dumps(row, ensure_ascii=False)
|
86 |
f.write("{}\n".format(row))
|
87 |
+
counter[split] += 1
|
88 |
|
89 |
print("counter: {}".format(counter))
|
90 |
|
examples/preprocess/preprocess_stsb_multi_mt.py
CHANGED
@@ -50,15 +50,31 @@ def main():
|
|
50 |
print(dataset_dict)
|
51 |
dataset_dict_list.append((name, dataset_dict))
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
text_set = set()
|
54 |
counter = defaultdict(int)
|
55 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
56 |
for language, dataset_dict in dataset_dict_list:
|
|
|
|
|
57 |
for k, v in dataset_dict.items():
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
61 |
|
|
|
62 |
sentence1 = sample["sentence1"]
|
63 |
sentence2 = sample["sentence2"]
|
64 |
|
@@ -76,11 +92,11 @@ def main():
|
|
76 |
"text": text,
|
77 |
"language": language,
|
78 |
"data_source": "stsb_multi_mt",
|
79 |
-
"split":
|
80 |
}
|
81 |
row = json.dumps(row, ensure_ascii=False)
|
82 |
f.write("{}\n".format(row))
|
83 |
-
counter[
|
84 |
|
85 |
print("counter: {}".format(counter))
|
86 |
|
|
|
50 |
print(dataset_dict)
|
51 |
dataset_dict_list.append((name, dataset_dict))
|
52 |
|
53 |
+
split_map = {
|
54 |
+
"dev": "validation",
|
55 |
+
"validate": "validation"
|
56 |
+
}
|
57 |
+
|
58 |
+
language_map = {
|
59 |
+
"zh": "zh-cn"
|
60 |
+
}
|
61 |
+
|
62 |
text_set = set()
|
63 |
counter = defaultdict(int)
|
64 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
65 |
for language, dataset_dict in dataset_dict_list:
|
66 |
+
language = language_map.get(language, language)
|
67 |
+
|
68 |
for k, v in dataset_dict.items():
|
69 |
+
if k in split_map.keys():
|
70 |
+
split = split_map[k]
|
71 |
+
else:
|
72 |
+
split = k
|
73 |
+
if split not in ("train", "validation", "test"):
|
74 |
+
print("skip split: {}".format(split))
|
75 |
+
continue
|
76 |
|
77 |
+
for sample in tqdm(v):
|
78 |
sentence1 = sample["sentence1"]
|
79 |
sentence2 = sample["sentence2"]
|
80 |
|
|
|
92 |
"text": text,
|
93 |
"language": language,
|
94 |
"data_source": "stsb_multi_mt",
|
95 |
+
"split": split
|
96 |
}
|
97 |
row = json.dumps(row, ensure_ascii=False)
|
98 |
f.write("{}\n".format(row))
|
99 |
+
counter[split] += 1
|
100 |
|
101 |
print("counter: {}".format(counter))
|
102 |
|
examples/preprocess/preprocess_xnli.py
CHANGED
@@ -46,10 +46,27 @@ def main():
|
|
46 |
)
|
47 |
print(dataset_dict)
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
text_set = set()
|
50 |
counter = defaultdict(int)
|
51 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
52 |
for k, v in dataset_dict.items():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
for sample in tqdm(v):
|
54 |
|
55 |
hypothesis = sample["hypothesis"]
|
@@ -63,8 +80,8 @@ def main():
|
|
63 |
language_list = hypothesis["language"] + premise_language_list
|
64 |
translation_list = hypothesis["translation"] + premise_text_list
|
65 |
for language, translation in zip(language_list, translation_list):
|
66 |
-
|
67 |
text = translation.strip()
|
|
|
68 |
|
69 |
if text in text_set:
|
70 |
continue
|
@@ -77,11 +94,11 @@ def main():
|
|
77 |
"text": text,
|
78 |
"language": language,
|
79 |
"data_source": "xnli",
|
80 |
-
"split":
|
81 |
}
|
82 |
row = json.dumps(row, ensure_ascii=False)
|
83 |
f.write("{}\n".format(row))
|
84 |
-
counter[
|
85 |
|
86 |
print("counter: {}".format(counter))
|
87 |
|
|
|
46 |
)
|
47 |
print(dataset_dict)
|
48 |
|
49 |
+
split_map = {
|
50 |
+
"dev": "validation",
|
51 |
+
"validate": "validation"
|
52 |
+
}
|
53 |
+
|
54 |
+
language_map = {
|
55 |
+
"zh": "zh-cn"
|
56 |
+
}
|
57 |
+
|
58 |
text_set = set()
|
59 |
counter = defaultdict(int)
|
60 |
with open(args.output_file, "w", encoding="utf-8") as f:
|
61 |
for k, v in dataset_dict.items():
|
62 |
+
if k in split_map.keys():
|
63 |
+
split = split_map[k]
|
64 |
+
else:
|
65 |
+
split = k
|
66 |
+
if split not in ("train", "validation", "test"):
|
67 |
+
print("skip split: {}".format(split))
|
68 |
+
continue
|
69 |
+
|
70 |
for sample in tqdm(v):
|
71 |
|
72 |
hypothesis = sample["hypothesis"]
|
|
|
80 |
language_list = hypothesis["language"] + premise_language_list
|
81 |
translation_list = hypothesis["translation"] + premise_text_list
|
82 |
for language, translation in zip(language_list, translation_list):
|
|
|
83 |
text = translation.strip()
|
84 |
+
language = language_map.get(language, language)
|
85 |
|
86 |
if text in text_set:
|
87 |
continue
|
|
|
94 |
"text": text,
|
95 |
"language": language,
|
96 |
"data_source": "xnli",
|
97 |
+
"split": split
|
98 |
}
|
99 |
row = json.dumps(row, ensure_ascii=False)
|
100 |
f.write("{}\n".format(row))
|
101 |
+
counter[split] += 1
|
102 |
|
103 |
print("counter: {}".format(counter))
|
104 |
|
language_identification.py
CHANGED
@@ -10,6 +10,13 @@ import datasets
|
|
10 |
|
11 |
_URLS = {
|
12 |
"amazon_reviews_multi": "data/amazon_reviews_multi.jsonl",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
}
|
14 |
|
15 |
|
@@ -40,11 +47,9 @@ LANGUAGE_MAP = {
|
|
40 |
"it": "italian",
|
41 |
"ja": "japanese",
|
42 |
"nl": "dutch",
|
43 |
-
# "nno": "norwegian (nynorsk)",
|
44 |
-
"nn": "norwegian (nynorsk)",
|
45 |
"no": "norwegian",
|
46 |
-
|
47 |
-
"
|
48 |
"pl": "polish",
|
49 |
"pt": "portuguese",
|
50 |
"ru": "russian",
|
@@ -54,7 +59,8 @@ LANGUAGE_MAP = {
|
|
54 |
"tr": "turkish",
|
55 |
"ur": "urdu",
|
56 |
"vi": "vietnamese",
|
57 |
-
"zh": "chinese",
|
|
|
58 |
}
|
59 |
|
60 |
|
@@ -63,6 +69,13 @@ class LanguageIdentification(datasets.GeneratorBasedBuilder):
|
|
63 |
|
64 |
BUILDER_CONFIGS = [
|
65 |
datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
]
|
67 |
|
68 |
def _info(self):
|
|
|
10 |
|
11 |
_URLS = {
|
12 |
"amazon_reviews_multi": "data/amazon_reviews_multi.jsonl",
|
13 |
+
"mike0307": "data/mike0307.jsonl",
|
14 |
+
"nbnn": "data/nbnn.jsonl",
|
15 |
+
"nordic_langid": "data/nordic_langid.jsonl",
|
16 |
+
"scandi_langid": "data/scandi_langid.jsonl",
|
17 |
+
"stsb_multi_mt": "data/stsb_multi_mt.jsonl",
|
18 |
+
"xnli": "data/xnli.jsonl",
|
19 |
+
|
20 |
}
|
21 |
|
22 |
|
|
|
47 |
"it": "italian",
|
48 |
"ja": "japanese",
|
49 |
"nl": "dutch",
|
|
|
|
|
50 |
"no": "norwegian",
|
51 |
+
"no-b": "norwegian (bokmål)",
|
52 |
+
"no-n": "norwegian (nynorsk)",
|
53 |
"pl": "polish",
|
54 |
"pt": "portuguese",
|
55 |
"ru": "russian",
|
|
|
59 |
"tr": "turkish",
|
60 |
"ur": "urdu",
|
61 |
"vi": "vietnamese",
|
62 |
+
"zh-cn": "simplified chinese",
|
63 |
+
"zh-tw": "traditional chinese",
|
64 |
}
|
65 |
|
66 |
|
|
|
69 |
|
70 |
BUILDER_CONFIGS = [
|
71 |
datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"),
|
72 |
+
datasets.BuilderConfig(name="mike0307", version=VERSION, description="mike0307"),
|
73 |
+
datasets.BuilderConfig(name="nbnn", version=VERSION, description="nbnn"),
|
74 |
+
datasets.BuilderConfig(name="nordic_langid", version=VERSION, description="nordic_langid"),
|
75 |
+
datasets.BuilderConfig(name="scandi_langid", version=VERSION, description="scandi_langid"),
|
76 |
+
datasets.BuilderConfig(name="stsb_multi_mt", version=VERSION, description="stsb_multi_mt"),
|
77 |
+
datasets.BuilderConfig(name="xnli", version=VERSION, description="xnli"),
|
78 |
+
|
79 |
]
|
80 |
|
81 |
def _info(self):
|
main.py
CHANGED
@@ -6,7 +6,13 @@ from tqdm import tqdm
|
|
6 |
|
7 |
dataset = load_dataset(
|
8 |
"language_identification.py",
|
9 |
-
name="amazon_reviews_multi",
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
split="train",
|
11 |
# streaming=True,
|
12 |
cache_dir=None,
|
|
|
6 |
|
7 |
dataset = load_dataset(
|
8 |
"language_identification.py",
|
9 |
+
# name="amazon_reviews_multi",
|
10 |
+
# name="mike0307",
|
11 |
+
name="nbnn",
|
12 |
+
# name="nordic_langid",
|
13 |
+
# name="scandi_langid",
|
14 |
+
# name="stsb_multi_mt",
|
15 |
+
# name="xnli",
|
16 |
split="train",
|
17 |
# streaming=True,
|
18 |
cache_dir=None,
|