fix readme
Browse files- .gitattributes +33 -0
- README.md +68 -0
- data/filter_unified.min_entity_1_max_predicate_10.train.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_10.validation.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_100.train.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_100.validation.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_25.train.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_25.validation.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_50.train.jsonl +3 -0
- data/filter_unified.min_entity_1_max_predicate_50.validation.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_10.train.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_10.validation.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_100.train.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_100.validation.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_25.train.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_25.validation.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_50.train.jsonl +3 -0
- data/filter_unified.min_entity_2_max_predicate_50.validation.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_10.train.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_10.validation.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_100.train.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_100.validation.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_25.train.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_25.validation.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_50.train.jsonl +3 -0
- data/filter_unified.min_entity_3_max_predicate_50.validation.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_10.train.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_10.validation.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_100.train.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_100.validation.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_25.train.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_25.validation.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_50.train.jsonl +3 -0
- data/filter_unified.min_entity_4_max_predicate_50.validation.jsonl +3 -0
- data/filter_unified.test.jsonl +3 -0
- process.py +64 -0
- t_rex_relation_similarity.py +81 -0
.gitattributes
CHANGED
@@ -52,3 +52,36 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
55 |
+
data/filter_unified.min_entity_2_max_predicate_10.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
56 |
+
data/filter_unified.min_entity_4_max_predicate_10.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
+
data/filter_unified.min_entity_4_max_predicate_100.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
data/filter_unified.min_entity_4_max_predicate_50.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
59 |
+
data/filter_unified.min_entity_4_max_predicate_100.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/filter_unified.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
data/filter_unified.min_entity_1_max_predicate_10.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
+
data/filter_unified.min_entity_1_max_predicate_100.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
63 |
+
data/filter_unified.min_entity_2_max_predicate_50.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
64 |
+
data/filter_unified.min_entity_3_max_predicate_25.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
65 |
+
data/filter_unified.min_entity_3_max_predicate_50.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
66 |
+
data/filter_unified.min_entity_4_max_predicate_10.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
67 |
+
data/filter_unified.min_entity_1_max_predicate_50.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
68 |
+
data/filter_unified.min_entity_1_max_predicate_50.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
69 |
+
data/filter_unified.min_entity_3_max_predicate_10.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
70 |
+
data/filter_unified.min_entity_1_max_predicate_25.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
71 |
+
data/filter_unified.min_entity_2_max_predicate_10.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
72 |
+
data/filter_unified.min_entity_2_max_predicate_100.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
73 |
+
data/filter_unified.min_entity_2_max_predicate_25.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
74 |
+
data/filter_unified.min_entity_2_max_predicate_50.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
75 |
+
data/filter_unified.min_entity_3_max_predicate_50.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
76 |
+
data/filter_unified.min_entity_4_max_predicate_25.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
77 |
+
data/filter_unified.min_entity_1_max_predicate_25.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
78 |
+
data/filter_unified.min_entity_2_max_predicate_100.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
79 |
+
data/filter_unified.min_entity_3_max_predicate_10.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
80 |
+
data/filter_unified.min_entity_4_max_predicate_25.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
81 |
+
data/filter_unified.min_entity_3_max_predicate_100.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
82 |
+
data/filter_unified.min_entity_3_max_predicate_100.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
83 |
+
data/filter_unified.min_entity_3_max_predicate_25.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
84 |
+
data/filter_unified.min_entity_1_max_predicate_10.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
85 |
+
data/filter_unified.min_entity_1_max_predicate_100.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
86 |
+
data/filter_unified.min_entity_2_max_predicate_25.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
87 |
+
data/filter_unified.min_entity_4_max_predicate_50.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license:
|
5 |
+
- other
|
6 |
+
multilinguality:
|
7 |
+
- monolingual
|
8 |
+
pretty_name: t_rex
|
9 |
+
---
|
10 |
+
|
11 |
+
# Dataset Card for "relbert/t_rex_relation_similarity"
|
12 |
+
## Dataset Description
|
13 |
+
- **Repository:** [https://hadyelsahar.github.io/t-rex/](https://hadyelsahar.github.io/t-rex/)
|
14 |
+
- **Paper:** [https://aclanthology.org/L18-1544/](https://aclanthology.org/L18-1544/)
|
15 |
+
- **Dataset:** T-REX
|
16 |
+
|
17 |
+
## Dataset Summary
|
18 |
+
This is the clean version of [T-REX](https://aclanthology.org/L18-1544/) converted into relation similarity dataset format.
|
19 |
+
The original dataset is [`relbert/t_rex`](https://huggingface.co/datasets/relbert/t_rex).
|
20 |
+
|
21 |
+
- statistics of the train/validation split
|
22 |
+
|
23 |
+
| data | num of relation types (train) | average num of positive pairs (train) | average num of negative pairs (train) | num of relation types (validation) | average num of positive pairs (validation) | average num of negative pairs (validation) |
|
24 |
+
|:-------------------------------|--------------------------------:|----------------------------------------:|----------------------------------------:|-------------------------------------:|---------------------------------------------:|---------------------------------------------:|
|
25 |
+
| min_entity_1_max_predicate_100 | 208 | 34 | 7041 | 133 | 6 | 781 |
|
26 |
+
| min_entity_1_max_predicate_50 | 204 | 20 | 4111 | 113 | 4 | 455 |
|
27 |
+
| min_entity_1_max_predicate_25 | 202 | 12 | 2346 | 71 | 3 | 259 |
|
28 |
+
| min_entity_1_max_predicate_10 | 192 | 6 | 1128 | 25 | 2 | 125 |
|
29 |
+
| min_entity_2_max_predicate_100 | 188 | 26 | 4847 | 107 | 5 | 537 |
|
30 |
+
| min_entity_2_max_predicate_50 | 184 | 16 | 2986 | 85 | 3 | 331 |
|
31 |
+
| min_entity_2_max_predicate_25 | 181 | 9 | 1702 | 51 | 3 | 188 |
|
32 |
+
| min_entity_2_max_predicate_10 | 171 | 5 | 853 | 13 | 2 | 94 |
|
33 |
+
| min_entity_3_max_predicate_100 | 166 | 22 | 3637 | 82 | 5 | 402 |
|
34 |
+
| min_entity_3_max_predicate_50 | 157 | 15 | 2321 | 66 | 3 | 257 |
|
35 |
+
| min_entity_3_max_predicate_25 | 156 | 9 | 1381 | 37 | 3 | 152 |
|
36 |
+
| min_entity_3_max_predicate_10 | 148 | 4 | 684 | 17 | 2 | 75 |
|
37 |
+
| min_entity_4_max_predicate_100 | 150 | 20 | 2975 | 73 | 4 | 329 |
|
38 |
+
| min_entity_4_max_predicate_50 | 145 | 14 | 1975 | 56 | 3 | 219 |
|
39 |
+
| min_entity_4_max_predicate_25 | 141 | 9 | 1212 | 34 | 3 | 133 |
|
40 |
+
| min_entity_4_max_predicate_10 | 128 | 4 | 599 | 14 | 2 | 66 |
|
41 |
+
|
42 |
+
- statistics of the test split
|
43 |
+
|
44 |
+
| num of relation types (test) | average num of positive pairs (test) | average num of negative pairs (test) |
|
45 |
+
|-------------------------------:|---------------------------------------:|---------------------------------------:|
|
46 |
+
| 24 | 5 | 117 |
|
47 |
+
|
48 |
+
|
49 |
+
## Dataset Structure
|
50 |
+
### Data Instances
|
51 |
+
An example looks as follows.
|
52 |
+
```
|
53 |
+
{
|
54 |
+
"relation_type": "[Airline] has a hub in [Location]",
|
55 |
+
"positives": [["Korean Air", "Seoul"], ["Asiana Airlines", "Seoul"], ["Cathay Pacific", "Hong Kong"], ["Dragonair", "Hong Kong"], ["Qantas", "Singapore"], ["Air China", "Beijing"], ["Singapore Airlines", "Singapore"]],
|
56 |
+
"negatives": [["joint resolution", "United States Congress"], ["joint resolution", "Congress"], ["Great Seal", "United States"], ["trident", "Ukraine"], ["harp", "Ireland"], ["Plantagenet", "England"], ["Pahonia", "Lithuania"], ["slavery", "American Civil War"], ["main asteroid belt", "Solar System"], ["Colorado Desert", "Sonoran Desert"], ["DNA", "genome"], ["Mars", "Solar System"], ["Manchester United", "red"], ["Kermit", "greenness"], ["Ruby", "red"], ["Liberal Party", "red"], ["Macintosh", "Apple"], ["Apple II", "Apple"], ["Apple III", "Apple"], ["PlayStation 2", "Sony"], ["PlayStation 2", "Sony Computer Entertainment"], ["Beatles", "George Martin"], ["Baku", "Azerbaijan"], ["Accra", "Ghana"], ["Amman", "Jordan"], ["Hannover", "Lower Saxony"], ["Agartala", "Tripura"], ["Makassar", "South Sulawesi"], ["Taiwan", "China"], ["Poland", "United Nations"], ["Poland", "Europe"], ["Poland", "European Union"], ["Poland", "NATO"], ["German invasion", "22 June 1941"], ["Operation Barbarossa", "22 June 1941"], ["Brazil", "Catholic Church"], ["Turkey", "Islam"], ["Afghanistan", "Islam"], ["Iraq", "Islam"], ["Finland", "Evangelical Lutheran Church"], ["England", "Roman Catholic"], ["Congress", "United States"], ["Sejm", "Poland"], ["Diet", "Japan"], ["Majlis", "Iran"], ["Riksdag", "Sweden"], ["Croatian Parliament", "Croatia"], ["Knesset", "Israel"], ["Parliament", "Sri Lanka"], ["Russia", "Soviet Union"], ["Ukrainian SSR", "Soviet Union"], ["Royal Flying Corps", "Royal Air Force"], ["Canadian Army", "Canadian Forces"], ["Belarus", "Russian"], ["Russia", "Russian"], ["Ukraine", "Russian"], ["Kerala", "Malayalam"], ["American", "English"], ["zlib license", "Open Source Initiative"], ["EPL", "Open Source Initiative"], ["GNU General Public License", "Open Source Initiative"], ["Wrigley Field", "Cubs"], ["Wrigley Field", "Chicago Cubs"], ["Yankee Stadium", "Yankees"], ["Passaic River", "Newark Bay"], ["Rocky", "Sylvester Stallone"], ["The Godfather", "Francis Ford Coppola"], ["Citizen Kane", "Orson Welles"], ["She Hate Me", "Spike Lee"], ["Raajneeti", "Prakash Jha"], ["Doctor Who", "Patrick Troughton"], ["Doctor Who", "Tom Baker"], ["Jana Gana Mana", "India"], ["President", "White House"], ["Washington", "Federalist Party"], ["George Washington", "Federalist Party"], ["Joseph Stalin", "Communist Party"], ["Mao Zedong", "Communist Party"], ["Lenin", "Communist Party"], ["Nelson Mandela", "ANC"], ["Putin", "Communist Party"], ["Nehru", "Indian National Congress"], ["Nicolas Sarkozy", "UMP"], ["Andreas Papandreou", "PASOK"], ["Tim Cook", "Apple"], ["Israel", "Isaac"], ["Meg", "Peter"], ["Elizabeth II", "Canada"], ["Victor Emmanuel III", "Italy"], ["Umberto I", "Italy"], ["Victor Emmanuel II", "Italy"], ["Brahms", "pianist"], ["Beethoven", "piano"], ["Nicky Hopkins", "pianist"], ["Mozart", "violin"], ["John Zorn", "saxophonist"], ["McCartney", "piano"], ["Russians", "Russian"], ["The Real McCoys", "CBS"], ["Brookside", "Channel 4"], ["The Real McCoys", "ABC"], ["Windows", "Microsoft"], ["Busan", "Gyeongbu Line"], ["Seoul", "Gyeongbu Line"], ["Springer Mountain", "Appalachian Trail"], ["Doctor Who", "BBC One"], ["central time zone", "Illinois"], ["CT", "Canada"], ["Central Time Zone", "Mexico"], ["Central Time Zone", "United States"], ["CT", "American"], ["CT", "Mexico"], ["CT", "United States"], ["central time zone", "Indiana"], ["Central Time Zone", "American"]]
|
57 |
+
}
|
58 |
+
```
|
59 |
+
|
60 |
+
## Citation Information
|
61 |
+
```
|
62 |
+
@inproceedings{elsahar2018t,
|
63 |
+
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
64 |
+
author={Elsahar, Hady and Vougiouklis, Pavlos and Remaci, Arslen and Gravier, Christophe and Hare, Jonathon and Laforest, Frederique and Simperl, Elena},
|
65 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
|
66 |
+
year={2018}
|
67 |
+
}
|
68 |
+
```
|
data/filter_unified.min_entity_1_max_predicate_10.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d69eda19134d3444648d603fd033f8080e777e6bb93a451860e362c1a3a4dc7b
|
3 |
+
size 6966611
|
data/filter_unified.min_entity_1_max_predicate_10.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:570159dfac5ac040e602c1eb63d4cf17daf5efcdb27992b37abb05099e5751d1
|
3 |
+
size 107890
|
data/filter_unified.min_entity_1_max_predicate_100.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:540cd4c2a7d0d079a7c8ef6275543b646b83ace16d5828f9fe08107b2185f85e
|
3 |
+
size 49099973
|
data/filter_unified.min_entity_1_max_predicate_100.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cbd4daed0f874854451a131913240d37ce833bf480c89b16b58dadedd9a6db48
|
3 |
+
size 3476629
|
data/filter_unified.min_entity_1_max_predicate_25.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b96c169831191190e7f318dcb326a4ed83af043f12c7ff4b090effa1ce563f8
|
3 |
+
size 15443818
|
data/filter_unified.min_entity_1_max_predicate_25.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:291b98c7d0012f1a287936363485c088b338475a607d5df28a4db40f4dc93bc9
|
3 |
+
size 618959
|
data/filter_unified.min_entity_1_max_predicate_50.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d31f3c671319f80ad3499514c265b25323641ac5b34199e8586351ce695cb04
|
3 |
+
size 27551730
|
data/filter_unified.min_entity_1_max_predicate_50.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:155b118d6c11ed96088bf03ee193eedfb21bc133c3726af8df807fc4617ae8b2
|
3 |
+
size 1740209
|
data/filter_unified.min_entity_2_max_predicate_10.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb7a9989cfe1ce3e42a06687a729660005ef0b75ba324ed24050a01a6844d02e
|
3 |
+
size 4637780
|
data/filter_unified.min_entity_2_max_predicate_10.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77286f3ab1897f75301e399483f0260bcca0542f2331170a5d9d6da32023a0a6
|
3 |
+
size 39299
|
data/filter_unified.min_entity_2_max_predicate_100.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d4bd18df9c5e5d1ac028c10f712f74235d04f2a34e011adc23659bb142b8314
|
3 |
+
size 29596755
|
data/filter_unified.min_entity_2_max_predicate_100.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f9e9791ae6b3738e4cc01b340a582082b3ed3ccc916c7c1353a1f2a5dad2c9e
|
3 |
+
size 1894548
|
data/filter_unified.min_entity_2_max_predicate_25.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd4fd074f40125fc6dac140e798fd121ca42037b970774d6b427c3c6ae56ef4e
|
3 |
+
size 9729765
|
data/filter_unified.min_entity_2_max_predicate_25.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbec44cc2defe593574ee97ac937c10ae260f4f1cdbead38b10110ba41c46398
|
3 |
+
size 319030
|
data/filter_unified.min_entity_2_max_predicate_50.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:155d936dc59513f942c7a5aaac6699acfcd8d373b716105a6c685e60babe0834
|
3 |
+
size 17643311
|
data/filter_unified.min_entity_2_max_predicate_50.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d105d0927e63c9e88654a03c2efd9941ebdd328ddf2d446241ea22ea83f71575
|
3 |
+
size 923790
|
data/filter_unified.min_entity_3_max_predicate_10.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b330c5f5b2a104f8b9d7ea39e95302ec70a5293c26dd32abf84c77af2ab3db83
|
3 |
+
size 3123088
|
data/filter_unified.min_entity_3_max_predicate_10.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d18d22ff5b68bcb52ffe13a2d4d995e1b2ec8cba2d8cf96730a1d2598bda34c4
|
3 |
+
size 40927
|
data/filter_unified.min_entity_3_max_predicate_100.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27f201fdf42c813da9671fe8966dc5dec18a2b70cc0e21220fced5db0e8ba0f0
|
3 |
+
size 19103948
|
data/filter_unified.min_entity_3_max_predicate_100.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f81062ebf81642a53c3c3a52761d619371c792f8614df5b86b24d9c417a7c1b
|
3 |
+
size 1042740
|
data/filter_unified.min_entity_3_max_predicate_25.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b86738b4ae03058500939ee84ff92dce33dca91a0a6f46a99b906c0fd19bec78
|
3 |
+
size 6680533
|
data/filter_unified.min_entity_3_max_predicate_25.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6402797ed016a17bd5d823ad0d3202f52f90440d115b5c369e954036cb67389
|
3 |
+
size 189365
|
data/filter_unified.min_entity_3_max_predicate_50.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1978ac7895610b69d5ccdfabee0f8be172aebc77357745b30808891e2b7aee6
|
3 |
+
size 11441982
|
data/filter_unified.min_entity_3_max_predicate_50.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d092e7fec5ba2f20c58140bc74ea58bed20fba3fe604986b91db7d5a3d623420
|
3 |
+
size 541636
|
data/filter_unified.min_entity_4_max_predicate_10.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57ad5e9a410b2dc06ed7175b57c8f01b134d6b599ceadf7caa09fa87f1f09da7
|
3 |
+
size 2352885
|
data/filter_unified.min_entity_4_max_predicate_10.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68c7cff1fc7a716e285a52b0101bb6391bb41b153e23aacf8d2dbdc81c50959e
|
3 |
+
size 29053
|
data/filter_unified.min_entity_4_max_predicate_100.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fd7df22453c09dab2cc15c95ac6ab58694e0251eea135e6d0db2fcc994123ad
|
3 |
+
size 13942787
|
data/filter_unified.min_entity_4_max_predicate_100.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11663dafe1a985aac2a298ccd36012674d6fe7929def8a63388a68582b84e78f
|
3 |
+
size 766223
|
data/filter_unified.min_entity_4_max_predicate_25.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:152bae2b052920a0543a9ef6716e3c0d155c7a50c54bd30180cd4b4988d729d8
|
3 |
+
size 5254636
|
data/filter_unified.min_entity_4_max_predicate_25.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7aba74fea472a74de983a266aa271cff7c557aaa0b62685636c07723d55fb3b3
|
3 |
+
size 146023
|
data/filter_unified.min_entity_4_max_predicate_50.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:600b2dfdac64163841031105bccf0a5228cc1496d5d083cba9b4f2d4a8db0a96
|
3 |
+
size 8912834
|
data/filter_unified.min_entity_4_max_predicate_50.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7863eaf6f090884a5de471d5b38aa0dfff204b1a172f67dbd55be1c2a1ef5b42
|
3 |
+
size 391676
|
data/filter_unified.test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:167a9d1309ae5a6507679ded0af46a1306c44dd6678800eb0e49b988d458c55b
|
3 |
+
size 88341
|
process.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from itertools import product
|
4 |
+
from statistics import mean
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from datasets import load_dataset
|
8 |
+
|
9 |
+
|
10 |
+
def process(name, split, output):
|
11 |
+
data = load_dataset("relbert/t_rex", name, split=split)
|
12 |
+
df = data.to_pandas()
|
13 |
+
df.pop('text')
|
14 |
+
df.pop('title')
|
15 |
+
df['pairs'] = [[i, j] for i, j in zip(df.pop('subject'), df.pop('object'))]
|
16 |
+
rel_sim_data = [{
|
17 |
+
"relation_type": pred,
|
18 |
+
"positives": g['pairs'].values.tolist(),
|
19 |
+
"negatives": df[df.predicate != pred]['pairs'].values.tolist()
|
20 |
+
} for pred, g in df.groupby("predicate") if len(g) >= 2]
|
21 |
+
with open(output, "w") as f:
|
22 |
+
f.write('\n'.join([json.dumps(i) for i in rel_sim_data]))
|
23 |
+
|
24 |
+
|
25 |
+
parameters_min_e_freq = [1, 2, 3, 4]
|
26 |
+
parameters_max_p_freq = [100, 50, 25, 10]
|
27 |
+
os.makedirs("data", exist_ok=True)
|
28 |
+
|
29 |
+
for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq):
|
30 |
+
for s in ['train', 'validation']:
|
31 |
+
process(
|
32 |
+
name=f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}",
|
33 |
+
split=s,
|
34 |
+
output=f"data/filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl")
|
35 |
+
|
36 |
+
process(
|
37 |
+
name=f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}",
|
38 |
+
split='test',
|
39 |
+
output=f"data/filter_unified.test.jsonl")
|
40 |
+
|
41 |
+
|
42 |
+
stats = []
|
43 |
+
for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq):
|
44 |
+
stats_tmp = {"data": f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}"}
|
45 |
+
for s in ['train', 'validation']:
|
46 |
+
with open(f"data/filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl") as f:
|
47 |
+
tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
48 |
+
stats_tmp[f'num of relation types ({s})'] = len(tmp)
|
49 |
+
stats_tmp[f'average num of positive pairs ({s})'] = round(mean([len(i['positives']) for i in tmp]))
|
50 |
+
stats_tmp[f'average num of negative pairs ({s})'] = round(mean([len(i['negatives']) for i in tmp]))
|
51 |
+
stats.append(stats_tmp)
|
52 |
+
df_stats = pd.DataFrame(stats)
|
53 |
+
df_stats.index = df_stats.pop('data')
|
54 |
+
print(df_stats.to_markdown())
|
55 |
+
|
56 |
+
stats_tmp = {}
|
57 |
+
with open("data/filter_unified.test.jsonl") as f:
|
58 |
+
tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
59 |
+
stats_tmp[f'num of relation types (test)'] = len(tmp)
|
60 |
+
stats_tmp[f'average num of positive pairs (test)'] = round(mean([len(i['positives']) for i in tmp]))
|
61 |
+
stats_tmp[f'average num of negative pairs (test)'] = round(mean([len(i['negatives']) for i in tmp]))
|
62 |
+
df_stats_test = pd.DataFrame([stats_tmp])
|
63 |
+
print(df_stats_test.to_markdown(index=False))
|
64 |
+
|
t_rex_relation_similarity.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from itertools import product
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
|
7 |
+
logger = datasets.logging.get_logger(__name__)
|
8 |
+
_DESCRIPTION = """T-Rex dataset."""
|
9 |
+
_NAME = "t_rex_relation_similarity"
|
10 |
+
_VERSION = "0.0.0"
|
11 |
+
_CITATION = """
|
12 |
+
@inproceedings{elsahar2018t,
|
13 |
+
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
14 |
+
author={Elsahar, Hady and Vougiouklis, Pavlos and Remaci, Arslen and Gravier, Christophe and Hare, Jonathon and Laforest, Frederique and Simperl, Elena},
|
15 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
|
16 |
+
year={2018}
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
21 |
+
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
22 |
+
MIN_ENTITY_FREQ = [1, 2, 3, 4]
|
23 |
+
MAX_PREDICATE_FREQ = [100, 50, 25, 10]
|
24 |
+
|
25 |
+
_TYPES = [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
|
26 |
+
_URLS = {i: {
|
27 |
+
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
28 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|
29 |
+
str(datasets.Split.TEST): [f'{_URL}/t_rex.filter_unified.test.jsonl']
|
30 |
+
} for i in _TYPES}
|
31 |
+
|
32 |
+
|
33 |
+
class TREXRelationSimilarityConfig(datasets.BuilderConfig):
|
34 |
+
"""BuilderConfig"""
|
35 |
+
|
36 |
+
def __init__(self, **kwargs):
|
37 |
+
"""BuilderConfig.
|
38 |
+
Args:
|
39 |
+
**kwargs: keyword arguments forwarded to super.
|
40 |
+
"""
|
41 |
+
super(TREXRelationSimilarityConfig, self).__init__(**kwargs)
|
42 |
+
|
43 |
+
|
44 |
+
class TREXRelationSimilarity(datasets.GeneratorBasedBuilder):
|
45 |
+
"""Dataset."""
|
46 |
+
|
47 |
+
BUILDER_CONFIGS = [
|
48 |
+
TREXRelationSimilarityConfig(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION)
|
49 |
+
for i in sorted(_TYPES)
|
50 |
+
]
|
51 |
+
|
52 |
+
def _split_generators(self, dl_manager):
|
53 |
+
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
|
54 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
55 |
+
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
56 |
+
|
57 |
+
def _generate_examples(self, filepaths):
|
58 |
+
_key = 0
|
59 |
+
for filepath in filepaths:
|
60 |
+
logger.info(f"generating examples from = {filepath}")
|
61 |
+
with open(filepath, encoding="utf-8") as f:
|
62 |
+
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
63 |
+
for i in _list:
|
64 |
+
data = json.loads(i)
|
65 |
+
yield _key, data
|
66 |
+
_key += 1
|
67 |
+
|
68 |
+
def _info(self):
|
69 |
+
return datasets.DatasetInfo(
|
70 |
+
description=_DESCRIPTION,
|
71 |
+
features=datasets.Features(
|
72 |
+
{
|
73 |
+
"relation_type": datasets.Value("string"),
|
74 |
+
"positives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
75 |
+
"negatives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
76 |
+
}
|
77 |
+
),
|
78 |
+
supervised_keys=None,
|
79 |
+
homepage=_HOME_PAGE,
|
80 |
+
citation=_CITATION,
|
81 |
+
)
|