Datasets:
update stats
Browse files- README.md +6 -6
- stats.ipynb +88 -21
README.md
CHANGED
@@ -11,18 +11,18 @@ Details about the inspec dataset can be found in the original paper [(Hulth, 200
|
|
11 |
Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021].
|
12 |
|
13 |
Text pre-processing (tokenization) is carried out using `spacy` (`en_core_web_sm` model) with a special rule to avoid splitting words with hyphens (e.g. graph-based is kept as one token).
|
14 |
-
Stemming (Porter's stemmer implementation provided in `nltk`) is
|
15 |
Details about the process can be found in `prmu.py`.
|
16 |
|
17 |
## Content and statistics
|
18 |
|
19 |
The dataset is divided into the following three splits:
|
20 |
|
21 |
-
| Split | # documents | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
|
22 |
-
| :--------- | ----------: | -----------: | --------: | ----------: | ------: | -------: |
|
23 |
-
| Train | 1,000 | 9.79 |
|
24 |
-
| Validation | 500 | 9.15 | 77.
|
25 |
-
| Test | 500 | 9.83 | 78.
|
26 |
|
27 |
The following data fields are available :
|
28 |
|
|
|
11 |
Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021].
|
12 |
|
13 |
Text pre-processing (tokenization) is carried out using `spacy` (`en_core_web_sm` model) with a special rule to avoid splitting words with hyphens (e.g. graph-based is kept as one token).
|
14 |
+
Stemming (Porter's stemmer implementation provided in `nltk`) is applied before reference keyphrases are matched against the source text.
|
15 |
Details about the process can be found in `prmu.py`.
|
16 |
|
17 |
## Content and statistics
|
18 |
|
19 |
The dataset is divided into the following three splits:
|
20 |
|
21 |
+
| Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
|
22 |
+
| :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: |
|
23 |
+
| Train | 1,000 | 141.7 | 9.79 | 78.00 | 9.85 | 6.22 | 5.93 |
|
24 |
+
| Validation | 500 | 132.2 | 9.15 | 77.96 | 9.82 | 6.75 | 5.47 |
|
25 |
+
| Test | 500 | 134.8 | 9.83 | 78.70 | 9.92 | 6.48 | 4.91 |
|
26 |
|
27 |
The following data fields are available :
|
28 |
|
stats.ipynb
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"id": "eba2ee81",
|
7 |
"metadata": {},
|
8 |
"outputs": [
|
@@ -17,7 +17,7 @@
|
|
17 |
{
|
18 |
"data": {
|
19 |
"application/vnd.jupyter.widget-view+json": {
|
20 |
-
"model_id": "
|
21 |
"version_major": 2,
|
22 |
"version_minor": 0
|
23 |
},
|
@@ -37,14 +37,14 @@
|
|
37 |
},
|
38 |
{
|
39 |
"cell_type": "code",
|
40 |
-
"execution_count":
|
41 |
"id": "4ba72244",
|
42 |
"metadata": {},
|
43 |
"outputs": [
|
44 |
{
|
45 |
"data": {
|
46 |
"application/vnd.jupyter.widget-view+json": {
|
47 |
-
"model_id": "
|
48 |
"version_major": 2,
|
49 |
"version_minor": 0
|
50 |
},
|
@@ -61,16 +61,16 @@
|
|
61 |
"text": [
|
62 |
"statistics for train\n",
|
63 |
"# keyphrases: 9.79\n",
|
64 |
-
"% P:
|
65 |
-
"% R: 9.
|
66 |
-
"% M: 6.
|
67 |
-
"% U: 5.
|
68 |
]
|
69 |
},
|
70 |
{
|
71 |
"data": {
|
72 |
"application/vnd.jupyter.widget-view+json": {
|
73 |
-
"model_id": "
|
74 |
"version_major": 2,
|
75 |
"version_minor": 0
|
76 |
},
|
@@ -87,16 +87,16 @@
|
|
87 |
"text": [
|
88 |
"statistics for validation\n",
|
89 |
"# keyphrases: 9.15\n",
|
90 |
-
"% P: 77.
|
91 |
"% R: 9.82\n",
|
92 |
-
"% M: 6.
|
93 |
-
"% U: 5.
|
94 |
]
|
95 |
},
|
96 |
{
|
97 |
"data": {
|
98 |
"application/vnd.jupyter.widget-view+json": {
|
99 |
-
"model_id": "
|
100 |
"version_major": 2,
|
101 |
"version_minor": 0
|
102 |
},
|
@@ -112,19 +112,17 @@
|
|
112 |
"output_type": "stream",
|
113 |
"text": [
|
114 |
"statistics for test\n",
|
115 |
-
"# keyphrases: 9.
|
116 |
-
"% P: 78.
|
117 |
-
"% R: 9.
|
118 |
-
"% M: 6.
|
119 |
-
"% U: 4.
|
120 |
]
|
121 |
}
|
122 |
],
|
123 |
"source": [
|
124 |
"from tqdm.notebook import tqdm\n",
|
125 |
"\n",
|
126 |
-
"\n",
|
127 |
-
"\n",
|
128 |
"for split in ['train', 'validation', 'test']:\n",
|
129 |
" \n",
|
130 |
" P, R, M, U, nb_kps = [], [], [], [], []\n",
|
@@ -144,10 +142,79 @@
|
|
144 |
" print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
|
145 |
]
|
146 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
{
|
148 |
"cell_type": "code",
|
149 |
"execution_count": null,
|
150 |
-
"id": "
|
151 |
"metadata": {},
|
152 |
"outputs": [],
|
153 |
"source": []
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
"id": "eba2ee81",
|
7 |
"metadata": {},
|
8 |
"outputs": [
|
|
|
17 |
{
|
18 |
"data": {
|
19 |
"application/vnd.jupyter.widget-view+json": {
|
20 |
+
"model_id": "6b71557b1f2b4fb48282f7d5d4a4fe37",
|
21 |
"version_major": 2,
|
22 |
"version_minor": 0
|
23 |
},
|
|
|
37 |
},
|
38 |
{
|
39 |
"cell_type": "code",
|
40 |
+
"execution_count": 2,
|
41 |
"id": "4ba72244",
|
42 |
"metadata": {},
|
43 |
"outputs": [
|
44 |
{
|
45 |
"data": {
|
46 |
"application/vnd.jupyter.widget-view+json": {
|
47 |
+
"model_id": "5ece754125e04f6ca8856ab3d924eec6",
|
48 |
"version_major": 2,
|
49 |
"version_minor": 0
|
50 |
},
|
|
|
61 |
"text": [
|
62 |
"statistics for train\n",
|
63 |
"# keyphrases: 9.79\n",
|
64 |
+
"% P: 78.00\n",
|
65 |
+
"% R: 9.85\n",
|
66 |
+
"% M: 6.22\n",
|
67 |
+
"% U: 5.93\n"
|
68 |
]
|
69 |
},
|
70 |
{
|
71 |
"data": {
|
72 |
"application/vnd.jupyter.widget-view+json": {
|
73 |
+
"model_id": "f607af2e66a94ece874b8c5ebea207f4",
|
74 |
"version_major": 2,
|
75 |
"version_minor": 0
|
76 |
},
|
|
|
87 |
"text": [
|
88 |
"statistics for validation\n",
|
89 |
"# keyphrases: 9.15\n",
|
90 |
+
"% P: 77.96\n",
|
91 |
"% R: 9.82\n",
|
92 |
+
"% M: 6.75\n",
|
93 |
+
"% U: 5.47\n"
|
94 |
]
|
95 |
},
|
96 |
{
|
97 |
"data": {
|
98 |
"application/vnd.jupyter.widget-view+json": {
|
99 |
+
"model_id": "54e7f57783e74ca9b5f7815eb6413031",
|
100 |
"version_major": 2,
|
101 |
"version_minor": 0
|
102 |
},
|
|
|
112 |
"output_type": "stream",
|
113 |
"text": [
|
114 |
"statistics for test\n",
|
115 |
+
"# keyphrases: 9.82\n",
|
116 |
+
"% P: 78.70\n",
|
117 |
+
"% R: 9.92\n",
|
118 |
+
"% M: 6.48\n",
|
119 |
+
"% U: 4.91\n"
|
120 |
]
|
121 |
}
|
122 |
],
|
123 |
"source": [
|
124 |
"from tqdm.notebook import tqdm\n",
|
125 |
"\n",
|
|
|
|
|
126 |
"for split in ['train', 'validation', 'test']:\n",
|
127 |
" \n",
|
128 |
" P, R, M, U, nb_kps = [], [], [], [], []\n",
|
|
|
142 |
" print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
|
143 |
]
|
144 |
},
|
145 |
+
{
|
146 |
+
"cell_type": "code",
|
147 |
+
"execution_count": 3,
|
148 |
+
"id": "4e08f80f",
|
149 |
+
"metadata": {},
|
150 |
+
"outputs": [],
|
151 |
+
"source": [
|
152 |
+
"import spacy\n",
|
153 |
+
"\n",
|
154 |
+
"nlp = spacy.load(\"en_core_web_sm\")\n",
|
155 |
+
"\n",
|
156 |
+
"# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
|
157 |
+
"\n",
|
158 |
+
"from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
|
159 |
+
"from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
|
160 |
+
"from spacy.util import compile_infix_regex\n",
|
161 |
+
"\n",
|
162 |
+
"# Modify tokenizer infix patterns\n",
|
163 |
+
"infixes = (\n",
|
164 |
+
" LIST_ELLIPSES\n",
|
165 |
+
" + LIST_ICONS\n",
|
166 |
+
" + [\n",
|
167 |
+
" r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
|
168 |
+
" r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
|
169 |
+
" al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
|
170 |
+
" ),\n",
|
171 |
+
" r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
|
172 |
+
" # ✅ Commented out regex that splits on hyphens between letters:\n",
|
173 |
+
" # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
|
174 |
+
" r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
|
175 |
+
" ]\n",
|
176 |
+
")\n",
|
177 |
+
"\n",
|
178 |
+
"infix_re = compile_infix_regex(infixes)\n",
|
179 |
+
"nlp.tokenizer.infix_finditer = infix_re.finditer"
|
180 |
+
]
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"cell_type": "code",
|
184 |
+
"execution_count": null,
|
185 |
+
"id": "6f219825",
|
186 |
+
"metadata": {},
|
187 |
+
"outputs": [
|
188 |
+
{
|
189 |
+
"data": {
|
190 |
+
"application/vnd.jupyter.widget-view+json": {
|
191 |
+
"model_id": "cecaef36d18c43caa6c8386cb67cdd96",
|
192 |
+
"version_major": 2,
|
193 |
+
"version_minor": 0
|
194 |
+
},
|
195 |
+
"text/plain": [
|
196 |
+
" 0%| | 0/1000 [00:00<?, ?it/s]"
|
197 |
+
]
|
198 |
+
},
|
199 |
+
"metadata": {},
|
200 |
+
"output_type": "display_data"
|
201 |
+
}
|
202 |
+
],
|
203 |
+
"source": [
|
204 |
+
"for split in ['train', 'validation', 'test']:\n",
|
205 |
+
" doc_len = []\n",
|
206 |
+
" for sample in tqdm(dataset[split]):\n",
|
207 |
+
" doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
|
208 |
+
" \n",
|
209 |
+
" print(\"statistics for {}\".format(split))\n",
|
210 |
+
" print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len)))\n",
|
211 |
+
" "
|
212 |
+
]
|
213 |
+
},
|
214 |
{
|
215 |
"cell_type": "code",
|
216 |
"execution_count": null,
|
217 |
+
"id": "9cdd2319",
|
218 |
"metadata": {},
|
219 |
"outputs": [],
|
220 |
"source": []
|