Datasets:
Update files from the datasets library (from 1.4.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.4.0
- README.md +21 -21
- dummy/psgs_w100.multiset.compressed.no_embeddings/0.0.0/dummy_data.zip +3 -0
- dummy/psgs_w100.multiset.exact.no_embeddings/0.0.0/dummy_data.zip +3 -0
- dummy/psgs_w100.multiset.no_index.no_embeddings/0.0.0/dummy_data.zip +3 -0
- dummy/psgs_w100.nq.compressed.no_embeddings/0.0.0/dummy_data.zip +3 -0
- dummy/psgs_w100.nq.exact.no_embeddings/0.0.0/dummy_data.zip +3 -0
- dummy/psgs_w100.nq.no_index.no_embeddings/0.0.0/dummy_data.zip +3 -0
- wiki_dpr.py +13 -8
README.md
CHANGED
@@ -27,7 +27,7 @@
|
|
27 |
- [Citation Information](#citation-information)
|
28 |
- [Contributions](#contributions)
|
29 |
|
30 |
-
##
|
31 |
|
32 |
- **Homepage:** [https://github.com/facebookresearch/DPR](https://github.com/facebookresearch/DPR)
|
33 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
@@ -37,25 +37,25 @@
|
|
37 |
- **Size of the generated dataset:** 448718.73 MB
|
38 |
- **Total amount of disk used:** 932739.13 MB
|
39 |
|
40 |
-
###
|
41 |
|
42 |
This is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.
|
43 |
It contains 21M passages from wikipedia along with their DPR embeddings.
|
44 |
The wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.
|
45 |
|
46 |
-
###
|
47 |
|
48 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
49 |
|
50 |
-
###
|
51 |
|
52 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
53 |
|
54 |
-
##
|
55 |
|
56 |
We show detailed information for up to 5 configurations of the dataset.
|
57 |
|
58 |
-
###
|
59 |
|
60 |
#### psgs_w100.multiset.compressed
|
61 |
|
@@ -147,7 +147,7 @@ This example was too long and was cropped:
|
|
147 |
}
|
148 |
```
|
149 |
|
150 |
-
###
|
151 |
|
152 |
The data fields are the same among all splits.
|
153 |
|
@@ -181,7 +181,7 @@ The data fields are the same among all splits.
|
|
181 |
- `title`: a `string` feature.
|
182 |
- `embeddings`: a `list` of `float32` features.
|
183 |
|
184 |
-
###
|
185 |
|
186 |
| name | train |
|
187 |
|-----------------------------|-------:|
|
@@ -191,49 +191,49 @@ The data fields are the same among all splits.
|
|
191 |
|psgs_w100.nq.compressed |21015300|
|
192 |
|psgs_w100.nq.exact |21015300|
|
193 |
|
194 |
-
##
|
195 |
|
196 |
-
###
|
197 |
|
198 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
199 |
|
200 |
-
###
|
201 |
|
202 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
203 |
|
204 |
-
###
|
205 |
|
206 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
207 |
|
208 |
-
###
|
209 |
|
210 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
211 |
|
212 |
-
##
|
213 |
|
214 |
-
###
|
215 |
|
216 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
217 |
|
218 |
-
###
|
219 |
|
220 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
221 |
|
222 |
-
###
|
223 |
|
224 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
225 |
|
226 |
-
##
|
227 |
|
228 |
-
###
|
229 |
|
230 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
231 |
|
232 |
-
###
|
233 |
|
234 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
235 |
|
236 |
-
###
|
237 |
|
238 |
```
|
239 |
|
|
|
27 |
- [Citation Information](#citation-information)
|
28 |
- [Contributions](#contributions)
|
29 |
|
30 |
+
## Dataset Description
|
31 |
|
32 |
- **Homepage:** [https://github.com/facebookresearch/DPR](https://github.com/facebookresearch/DPR)
|
33 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
|
|
37 |
- **Size of the generated dataset:** 448718.73 MB
|
38 |
- **Total amount of disk used:** 932739.13 MB
|
39 |
|
40 |
+
### Dataset Summary
|
41 |
|
42 |
This is the wikipedia split used to evaluate the Dense Passage Retrieval (DPR) model.
|
43 |
It contains 21M passages from wikipedia along with their DPR embeddings.
|
44 |
The wikipedia articles were split into multiple, disjoint text blocks of 100 words as passages.
|
45 |
|
46 |
+
### Supported Tasks
|
47 |
|
48 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
49 |
|
50 |
+
### Languages
|
51 |
|
52 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
53 |
|
54 |
+
## Dataset Structure
|
55 |
|
56 |
We show detailed information for up to 5 configurations of the dataset.
|
57 |
|
58 |
+
### Data Instances
|
59 |
|
60 |
#### psgs_w100.multiset.compressed
|
61 |
|
|
|
147 |
}
|
148 |
```
|
149 |
|
150 |
+
### Data Fields
|
151 |
|
152 |
The data fields are the same among all splits.
|
153 |
|
|
|
181 |
- `title`: a `string` feature.
|
182 |
- `embeddings`: a `list` of `float32` features.
|
183 |
|
184 |
+
### Data Splits Sample Size
|
185 |
|
186 |
| name | train |
|
187 |
|-----------------------------|-------:|
|
|
|
191 |
|psgs_w100.nq.compressed |21015300|
|
192 |
|psgs_w100.nq.exact |21015300|
|
193 |
|
194 |
+
## Dataset Creation
|
195 |
|
196 |
+
### Curation Rationale
|
197 |
|
198 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
199 |
|
200 |
+
### Source Data
|
201 |
|
202 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
203 |
|
204 |
+
### Annotations
|
205 |
|
206 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
207 |
|
208 |
+
### Personal and Sensitive Information
|
209 |
|
210 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
211 |
|
212 |
+
## Considerations for Using the Data
|
213 |
|
214 |
+
### Social Impact of Dataset
|
215 |
|
216 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
217 |
|
218 |
+
### Discussion of Biases
|
219 |
|
220 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
221 |
|
222 |
+
### Other Known Limitations
|
223 |
|
224 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
225 |
|
226 |
+
## Additional Information
|
227 |
|
228 |
+
### Dataset Curators
|
229 |
|
230 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
231 |
|
232 |
+
### Licensing Information
|
233 |
|
234 |
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
235 |
|
236 |
+
### Citation Information
|
237 |
|
238 |
```
|
239 |
|
dummy/psgs_w100.multiset.compressed.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:973e7bafe41921f0a524cddeee39677bbfc9a012e38223c0b29a46541b7fc75c
|
3 |
+
size 1433
|
dummy/psgs_w100.multiset.exact.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a7724c87f34b919b40ef12c24e740ff0dd78de2cb64a3f88218183918c3c025
|
3 |
+
size 1413
|
dummy/psgs_w100.multiset.no_index.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:603f7c7c74daae054293237889038a83e38d3004ca5ebf32fb2d9bd78f2defb9
|
3 |
+
size 1121
|
dummy/psgs_w100.nq.compressed.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eabb0f68f9976d93f2d9b6193c7f2def7b621b68ff841a44d9db8ead1162f60e
|
3 |
+
size 1421
|
dummy/psgs_w100.nq.exact.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5591de007662b92b4b204b38866015028eb556f500b92743b4258776a0c909da
|
3 |
+
size 1401
|
dummy/psgs_w100.nq.no_index.no_embeddings/0.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5025ccb49a6473fcbcd27e1e3d8d1af886dccb8c264e99fae28a085324380505
|
3 |
+
size 1121
|
wiki_dpr.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import logging
|
2 |
import os
|
3 |
|
4 |
import numpy as np
|
@@ -6,6 +5,9 @@ import numpy as np
|
|
6 |
import datasets
|
7 |
|
8 |
|
|
|
|
|
|
|
9 |
_CITATION = """
|
10 |
@misc{karpukhin2020dense,
|
11 |
title={Dense Passage Retrieval for Open-Domain Question Answering},
|
@@ -60,13 +62,15 @@ class WikiDprConfig(datasets.BuilderConfig):
|
|
60 |
**kwargs: keyword arguments forwarded to super.
|
61 |
"""
|
62 |
self.with_embeddings = with_embeddings
|
63 |
-
self.with_index = with_index
|
64 |
self.wiki_split = wiki_split
|
65 |
self.embeddings_name = embeddings_name
|
66 |
self.index_name = index_name if with_index else "no_index"
|
67 |
self.index_train_size = index_train_size
|
68 |
self.dummy = dummy
|
69 |
name = [self.wiki_split, self.embeddings_name, self.index_name]
|
|
|
|
|
70 |
if self.dummy:
|
71 |
name = ["dummy"] + name
|
72 |
assert (
|
@@ -92,10 +96,11 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
92 |
BUILDER_CONFIGS = [
|
93 |
WikiDprConfig(
|
94 |
embeddings_name=embeddings_name,
|
95 |
-
|
96 |
index_name=index_name,
|
97 |
version=datasets.Version("0.0.0"),
|
98 |
)
|
|
|
99 |
for embeddings_name in ("nq", "multiset")
|
100 |
for index_name in ("exact", "compressed", "no_index")
|
101 |
]
|
@@ -149,7 +154,7 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
149 |
if self.config.with_embeddings:
|
150 |
if vec_idx >= len(vecs):
|
151 |
if len(vectors_files) == 0:
|
152 |
-
|
153 |
break
|
154 |
vecs = np.load(open(vectors_files.pop(0), "rb"), allow_pickle=True)
|
155 |
vec_idx = 0
|
@@ -192,12 +197,12 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
192 |
|
193 |
d = 768
|
194 |
train_size = self.config.index_train_size
|
195 |
-
|
196 |
if self.config.index_name == "exact":
|
197 |
-
index = faiss.
|
198 |
index.hnsw.efConstruction = 200
|
199 |
index.hnsw.efSearch = 128
|
200 |
-
dataset.add_faiss_index("embeddings", custom_index=index)
|
201 |
else:
|
202 |
quantizer = faiss.IndexHNSWFlat(d, 128, faiss.METRIC_INNER_PRODUCT)
|
203 |
quantizer.hnsw.efConstruction = 200
|
@@ -211,6 +216,6 @@ class WikiDpr(datasets.GeneratorBasedBuilder):
|
|
211 |
train_size=train_size,
|
212 |
custom_index=ivf_index,
|
213 |
)
|
214 |
-
|
215 |
dataset.save_faiss_index("embeddings", index_file)
|
216 |
return dataset
|
|
|
|
|
1 |
import os
|
2 |
|
3 |
import numpy as np
|
|
|
5 |
import datasets
|
6 |
|
7 |
|
8 |
+
logger = datasets.logging.get_logger(__name__)
|
9 |
+
|
10 |
+
|
11 |
_CITATION = """
|
12 |
@misc{karpukhin2020dense,
|
13 |
title={Dense Passage Retrieval for Open-Domain Question Answering},
|
|
|
62 |
**kwargs: keyword arguments forwarded to super.
|
63 |
"""
|
64 |
self.with_embeddings = with_embeddings
|
65 |
+
self.with_index = with_index and index_name != "no_index"
|
66 |
self.wiki_split = wiki_split
|
67 |
self.embeddings_name = embeddings_name
|
68 |
self.index_name = index_name if with_index else "no_index"
|
69 |
self.index_train_size = index_train_size
|
70 |
self.dummy = dummy
|
71 |
name = [self.wiki_split, self.embeddings_name, self.index_name]
|
72 |
+
if not self.with_embeddings:
|
73 |
+
name.append("no_embeddings")
|
74 |
if self.dummy:
|
75 |
name = ["dummy"] + name
|
76 |
assert (
|
|
|
96 |
BUILDER_CONFIGS = [
|
97 |
WikiDprConfig(
|
98 |
embeddings_name=embeddings_name,
|
99 |
+
with_embeddings=with_embeddings,
|
100 |
index_name=index_name,
|
101 |
version=datasets.Version("0.0.0"),
|
102 |
)
|
103 |
+
for with_embeddings in (True, False)
|
104 |
for embeddings_name in ("nq", "multiset")
|
105 |
for index_name in ("exact", "compressed", "no_index")
|
106 |
]
|
|
|
154 |
if self.config.with_embeddings:
|
155 |
if vec_idx >= len(vecs):
|
156 |
if len(vectors_files) == 0:
|
157 |
+
logger.warning("Ran out of vector files at index {}".format(i))
|
158 |
break
|
159 |
vecs = np.load(open(vectors_files.pop(0), "rb"), allow_pickle=True)
|
160 |
vec_idx = 0
|
|
|
197 |
|
198 |
d = 768
|
199 |
train_size = self.config.index_train_size
|
200 |
+
logger.info("Building wiki_dpr faiss index")
|
201 |
if self.config.index_name == "exact":
|
202 |
+
index = faiss.IndexHNSWSQ(d, faiss.ScalarQuantizer.QT_8bit, 128, faiss.METRIC_INNER_PRODUCT)
|
203 |
index.hnsw.efConstruction = 200
|
204 |
index.hnsw.efSearch = 128
|
205 |
+
dataset.add_faiss_index("embeddings", custom_index=index, train_size=train_size)
|
206 |
else:
|
207 |
quantizer = faiss.IndexHNSWFlat(d, 128, faiss.METRIC_INNER_PRODUCT)
|
208 |
quantizer.hnsw.efConstruction = 200
|
|
|
216 |
train_size=train_size,
|
217 |
custom_index=ivf_index,
|
218 |
)
|
219 |
+
logger.info("Saving wiki_dpr faiss index")
|
220 |
dataset.save_faiss_index("embeddings", index_file)
|
221 |
return dataset
|