Gaëtan Caillaut
commited on
Commit
·
46a70d6
1
Parent(s):
9362a18
update to frwiki-20220901 and to v0.2
Browse files- .gitattributes +1 -5
- data/{frwiki-20220601 → frwiki-20220901}/corpus.jsonl.gz +2 -2
- data/{frwiki-20220601 → frwiki-20220901}/corpus_abstracts.jsonl.gz +2 -2
- data/{frwiki-20220601 → frwiki-20220901}/corpus_mini.jsonl.gz +2 -2
- data/{frwiki-20220601 → frwiki-20220901}/entities.jsonl.gz +2 -2
- frwiki_el.py +8 -83
.gitattributes
CHANGED
@@ -35,8 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
-
data/
|
39 |
-
data/entities.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
40 |
-
data/corpus_mini.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
41 |
-
data/corpus_abstracts.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
42 |
-
data/frwiki-20220601/*.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
|
|
35 |
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
36 |
*.ogg filter=lfs diff=lfs merge=lfs -text
|
37 |
*.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
+
data/frwiki-20220901/*.jsonl.gz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
data/{frwiki-20220601 → frwiki-20220901}/corpus.jsonl.gz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:960f5dcf94cb73061c21de8f2f4b3979c388f3924419764a40da5db4d757855c
|
3 |
+
size 3193932378
|
data/{frwiki-20220601 → frwiki-20220901}/corpus_abstracts.jsonl.gz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b1954986631da26d31c9a7b8a4ad30e108981f7201728c57555a052ac77f210
|
3 |
+
size 465701861
|
data/{frwiki-20220601 → frwiki-20220901}/corpus_mini.jsonl.gz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec52d912d96a771cb6374b2358024b47df7f3bde3b2ae03743fda567370ae827
|
3 |
+
size 132297
|
data/{frwiki-20220601 → frwiki-20220901}/entities.jsonl.gz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3711542c9bee2dfbf5c93eb8bd71efda996b6161e808f0debe1a4df0af643585
|
3 |
+
size 185405217
|
frwiki_el.py
CHANGED
@@ -35,82 +35,17 @@ _HOMEPAGE = "https://github.com/GaaH/frwiki_el"
|
|
35 |
_LICENSE = "WTFPL"
|
36 |
|
37 |
_URLs = {
|
38 |
-
"frwiki": "data/frwiki-
|
39 |
-
"frwiki-mini": "data/frwiki-
|
40 |
-
"frwiki-abstracts": "data/frwiki-
|
41 |
-
"entities": "data/frwiki-
|
42 |
}
|
43 |
|
44 |
-
_NER_CLASS_LABELS = [
|
45 |
-
"B",
|
46 |
-
"I",
|
47 |
-
"O",
|
48 |
-
]
|
49 |
-
|
50 |
-
_ENTITY_TYPES = [
|
51 |
-
"DATE",
|
52 |
-
"PERSON",
|
53 |
-
"GEOLOC",
|
54 |
-
"ORG",
|
55 |
-
"OTHER",
|
56 |
-
]
|
57 |
-
|
58 |
-
|
59 |
-
def item_to_el_features(item, title2qid):
|
60 |
-
res = {
|
61 |
-
"title": item['name'].replace("_", " "),
|
62 |
-
"wikidata_id": item['wikidata_id'],
|
63 |
-
"wikipedia_id": item['wikipedia_id'],
|
64 |
-
"wikidata_url": item['wikidata_url'],
|
65 |
-
"wikipedia_url": item['wikipedia_url'],
|
66 |
-
}
|
67 |
-
text_dict = {
|
68 |
-
"words": [],
|
69 |
-
"ner": [],
|
70 |
-
"el": [],
|
71 |
-
}
|
72 |
-
entity_pattern = r"\[E=(.+?)\](.+?)\[/E\]"
|
73 |
-
|
74 |
-
# start index of the previous text
|
75 |
-
i = 0
|
76 |
-
text = item['text']
|
77 |
-
for m in re.finditer(entity_pattern, text):
|
78 |
-
mention_title = m.group(1)
|
79 |
-
mention = m.group(2)
|
80 |
-
|
81 |
-
mention_qid = title2qid.get(mention_title.replace("_", " "), "unknown")
|
82 |
-
|
83 |
-
mention_words = mention.split()
|
84 |
-
|
85 |
-
j = m.start(0)
|
86 |
-
prev_text = text[i:j].split()
|
87 |
-
len_prev_text = len(prev_text)
|
88 |
-
text_dict["words"].extend(prev_text)
|
89 |
-
text_dict["ner"].extend(["O"] * len_prev_text)
|
90 |
-
text_dict["el"].extend([None] * len_prev_text)
|
91 |
-
|
92 |
-
text_dict["words"].extend(mention_words)
|
93 |
-
|
94 |
-
len_mention_tail = len(mention_words) - 1
|
95 |
-
text_dict["ner"].extend(["B"] + ["I"] * len_mention_tail)
|
96 |
-
text_dict["el"].extend([mention_qid] + [mention_qid] * len_mention_tail)
|
97 |
-
|
98 |
-
i = m.end(0)
|
99 |
-
|
100 |
-
tail = text[i:].split()
|
101 |
-
len_tail = len(tail)
|
102 |
-
text_dict["words"].extend(tail)
|
103 |
-
text_dict["ner"].extend(["O"] * len_tail)
|
104 |
-
text_dict["el"].extend([None] * len_tail)
|
105 |
-
res.update(text_dict)
|
106 |
-
return res
|
107 |
-
|
108 |
-
|
109 |
class FrwikiElDataset(datasets.GeneratorBasedBuilder):
|
110 |
"""
|
111 |
"""
|
112 |
|
113 |
-
VERSION = datasets.Version("0.
|
114 |
|
115 |
# This is an example of a dataset with multiple configurations.
|
116 |
# If you don't want/need to define several sub-sets in your dataset,
|
@@ -139,16 +74,6 @@ class FrwikiElDataset(datasets.GeneratorBasedBuilder):
|
|
139 |
|
140 |
def _info(self):
|
141 |
if self.config.name in ("frwiki", 'frwiki-mini', 'frwiki-abstracts'):
|
142 |
-
# features = datasets.Features({
|
143 |
-
# "name": datasets.Value("string"),
|
144 |
-
# "wikidata_id": datasets.Value("string"),
|
145 |
-
# "wikipedia_id": datasets.Value("string"),
|
146 |
-
# "wikipedia_url": datasets.Value("string"),
|
147 |
-
# "wikidata_url": datasets.Value("string"),
|
148 |
-
# "words": [datasets.Value("string")],
|
149 |
-
# "ner": [datasets.ClassLabel(names=_NER_CLASS_LABELS)],
|
150 |
-
# "el": [datasets.Value("string")],
|
151 |
-
# })
|
152 |
features = datasets.Features({
|
153 |
"name": datasets.Value("string"),
|
154 |
"wikidata_id": datasets.Value("string"),
|
@@ -156,8 +81,8 @@ class FrwikiElDataset(datasets.GeneratorBasedBuilder):
|
|
156 |
"wikipedia_url": datasets.Value("string"),
|
157 |
"wikidata_url": datasets.Value("string"),
|
158 |
"sentences": [{
|
159 |
-
"
|
160 |
-
"ner": [datasets.
|
161 |
"mention_mappings": [[datasets.Value("int16")]],
|
162 |
"el_wikidata_id": [datasets.Value("string")],
|
163 |
"el_wikipedia_id": [datasets.Value("string")],
|
@@ -221,5 +146,5 @@ class FrwikiElDataset(datasets.GeneratorBasedBuilder):
|
|
221 |
# https://github.com/huggingface/datasets/issues/2607#issuecomment-883219727
|
222 |
with gzip.open(open(path, 'rb'), "rt", encoding="UTF-8") as datafile:
|
223 |
for id, line in enumerate(datafile):
|
224 |
-
item = json.loads(line
|
225 |
yield id, item
|
|
|
35 |
_LICENSE = "WTFPL"
|
36 |
|
37 |
_URLs = {
|
38 |
+
"frwiki": "data/frwiki-20220901/corpus.jsonl.gz",
|
39 |
+
"frwiki-mini": "data/frwiki-20220901/corpus_mini.jsonl.gz",
|
40 |
+
"frwiki-abstracts": "data/frwiki-20220901/corpus_abstracts.jsonl.gz",
|
41 |
+
"entities": "data/frwiki-20220901/entities.jsonl.gz",
|
42 |
}
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
class FrwikiElDataset(datasets.GeneratorBasedBuilder):
|
45 |
"""
|
46 |
"""
|
47 |
|
48 |
+
VERSION = datasets.Version("0.2.0")
|
49 |
|
50 |
# This is an example of a dataset with multiple configurations.
|
51 |
# If you don't want/need to define several sub-sets in your dataset,
|
|
|
74 |
|
75 |
def _info(self):
|
76 |
if self.config.name in ("frwiki", 'frwiki-mini', 'frwiki-abstracts'):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
features = datasets.Features({
|
78 |
"name": datasets.Value("string"),
|
79 |
"wikidata_id": datasets.Value("string"),
|
|
|
81 |
"wikipedia_url": datasets.Value("string"),
|
82 |
"wikidata_url": datasets.Value("string"),
|
83 |
"sentences": [{
|
84 |
+
"text": datasets.Value("string"),
|
85 |
+
"ner": [datasets.Value("string")],
|
86 |
"mention_mappings": [[datasets.Value("int16")]],
|
87 |
"el_wikidata_id": [datasets.Value("string")],
|
88 |
"el_wikipedia_id": [datasets.Value("string")],
|
|
|
146 |
# https://github.com/huggingface/datasets/issues/2607#issuecomment-883219727
|
147 |
with gzip.open(open(path, 'rb'), "rt", encoding="UTF-8") as datafile:
|
148 |
for id, line in enumerate(datafile):
|
149 |
+
item = json.loads(line)
|
150 |
yield id, item
|