iqtam commited on
Commit
c24b80b
·
verified ·
1 Parent(s): c1dcd84

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # reddit first 450000 dataset
data-00000-of-00004.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4b09e1d90ce0a74101711233ccdb5a944d43ae35561a485e41855e0d7b3af68
3
+ size 426016032
data-00001-of-00004.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5eaf73f50f43ddca57d3db898a6e3b8f3f5700b8e83b8cf26d7ac26754fced7
3
+ size 421498224
data-00002-of-00004.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c6fed161f94249c6d197cb5fa4e9e12dc4543d856e12b32284a14b265128a1
3
+ size 421795704
data-00003-of-00004.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e84e17399f60222e88a4db9b6f46e311f81781f4b34680f18b410c6e2929c5df
3
+ size 423563400
dataset_info.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "tldr-17",
3
+ "citation": "\n@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}\n",
4
+ "config_name": "default",
5
+ "dataset_name": "tldr-17",
6
+ "dataset_size": 18936201253,
7
+ "description": "\nThis corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.\n",
8
+ "download_checksums": {
9
+ "data/corpus-webis-tldr-17.zip": {
10
+ "num_bytes": 3141854161,
11
+ "checksum": null
12
+ }
13
+ },
14
+ "download_size": 3141854161,
15
+ "features": {
16
+ "author": {
17
+ "dtype": "string",
18
+ "_type": "Value"
19
+ },
20
+ "body": {
21
+ "dtype": "string",
22
+ "_type": "Value"
23
+ },
24
+ "normalizedBody": {
25
+ "dtype": "string",
26
+ "_type": "Value"
27
+ },
28
+ "subreddit": {
29
+ "dtype": "string",
30
+ "_type": "Value"
31
+ },
32
+ "subreddit_id": {
33
+ "dtype": "string",
34
+ "_type": "Value"
35
+ },
36
+ "id": {
37
+ "dtype": "string",
38
+ "_type": "Value"
39
+ },
40
+ "content": {
41
+ "dtype": "string",
42
+ "_type": "Value"
43
+ },
44
+ "summary": {
45
+ "dtype": "string",
46
+ "_type": "Value"
47
+ }
48
+ },
49
+ "homepage": "https://github.com/webis-de/webis-tldr-17-corpus",
50
+ "license": "",
51
+ "size_in_bytes": 22078055414,
52
+ "splits": {
53
+ "train": {
54
+ "name": "train",
55
+ "num_bytes": 18936201253,
56
+ "num_examples": 3848330,
57
+ "shard_lengths": [
58
+ 133000,
59
+ 134000,
60
+ 133000,
61
+ 133000,
62
+ 133000,
63
+ 133000,
64
+ 133000,
65
+ 133000,
66
+ 133000,
67
+ 133000,
68
+ 134000,
69
+ 132000,
70
+ 133000,
71
+ 134000,
72
+ 133000,
73
+ 133000,
74
+ 107000,
75
+ 84000,
76
+ 82000,
77
+ 79000,
78
+ 85000,
79
+ 82000,
80
+ 81000,
81
+ 81000,
82
+ 76000,
83
+ 76000,
84
+ 78000,
85
+ 76000,
86
+ 76000,
87
+ 77000,
88
+ 78000,
89
+ 77000,
90
+ 75000,
91
+ 72000,
92
+ 73000,
93
+ 74000,
94
+ 72000,
95
+ 57330
96
+ ],
97
+ "dataset_name": "tldr-17"
98
+ }
99
+ },
100
+ "version": {
101
+ "version_str": "1.0.0",
102
+ "major": 1,
103
+ "minor": 0,
104
+ "patch": 0
105
+ }
106
+ }
state.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00004.arrow"
5
+ },
6
+ {
7
+ "filename": "data-00001-of-00004.arrow"
8
+ },
9
+ {
10
+ "filename": "data-00002-of-00004.arrow"
11
+ },
12
+ {
13
+ "filename": "data-00003-of-00004.arrow"
14
+ }
15
+ ],
16
+ "_fingerprint": "d80a3a13d3bc18f0",
17
+ "_format_columns": null,
18
+ "_format_kwargs": {},
19
+ "_format_type": null,
20
+ "_output_all_columns": false,
21
+ "_split": "train[:450000]"
22
+ }