nthngdy commited on
Commit
8d31e52
·
1 Parent(s): 45d6f29

Upload ccnews_split.py

Browse files
Files changed (1) hide show
  1. ccnews_split.py +140 -0
ccnews_split.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
18
+
19
+ import json
20
+ import os
21
+ from fnmatch import fnmatch
22
+
23
+ import datasets
24
+
25
+
26
+ def custom_iter_archive(archive):
27
+ def _iter_archive(f):
28
+ stream = tarfile.open(fileobj=f, mode="r|*")
29
+ print(stream)
30
+ print(len(stream))
31
+ for tarinfo in stream:
32
+ file_path = tarinfo.name
33
+ if not tarinfo.isreg():
34
+ continue
35
+ if file_path is None:
36
+ continue
37
+ if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
38
+ # skipping hidden files
39
+ continue
40
+ file_obj = stream.extractfile(tarinfo)
41
+ yield file_path, file_obj
42
+ stream.members = []
43
+ del stream
44
+
45
+ if hasattr(path_or_buf, "read"):
46
+ yield from _iter_archive(path_or_buf)
47
+ else:
48
+ with open(path_or_buf, "rb") as f:
49
+ yield from _iter_archive(f)
50
+
51
+ logger = datasets.logging.get_logger(__name__)
52
+
53
+
54
+ _DESCRIPTION = """\
55
+ CC-News containing news articles from news sites all over the world \
56
+ The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. \
57
+ This version of the dataset has 708241 articles. It represents a small portion of English \
58
+ language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to \
59
+ collect and extract English language portion of CC-News.
60
+ """
61
+
62
+ _CITATION = """\
63
+ @InProceedings{Hamborg2017,
64
+ author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},
65
+ title = {news-please: A Generic News Crawler and Extractor},
66
+ year = {2017},
67
+ booktitle = {Proceedings of the 15th International Symposium of Information Science},
68
+ location = {Berlin},
69
+ doi = {10.5281/zenodo.4120316},
70
+ pages = {218--223},
71
+ month = {March}
72
+ }
73
+ """
74
+ _PROJECT_URL = "https://commoncrawl.org/2016/10/news-dataset-available/"
75
+ _DOWNLOAD_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/cc_news/cc_news.tar.gz"
76
+
77
+
78
+ class CCNewsConfig(datasets.BuilderConfig):
79
+ """BuilderConfig for CCNews."""
80
+
81
+ def __init__(self, **kwargs):
82
+ """BuilderConfig for CCNews.
83
+ Args:
84
+ **kwargs: keyword arguments forwarded to super.
85
+ """
86
+ super(CCNewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
87
+
88
+
89
+ class CCNews(datasets.GeneratorBasedBuilder):
90
+ """CC-News dataset."""
91
+
92
+ BUILDER_CONFIGS = [
93
+ CCNewsConfig(
94
+ name="plain_text",
95
+ description="Plain text",
96
+ )
97
+ ]
98
+
99
+ def _info(self):
100
+ return datasets.DatasetInfo(
101
+ description=_DESCRIPTION,
102
+ features=datasets.Features(
103
+ {
104
+ "title": datasets.Value("string"),
105
+ "text": datasets.Value("string"),
106
+ "domain": datasets.Value("string"),
107
+ "date": datasets.Value("string"),
108
+ "description": datasets.Value("string"),
109
+ "url": datasets.Value("string"),
110
+ "image_url": datasets.Value("string"),
111
+ }
112
+ ),
113
+ supervised_keys=None,
114
+ homepage=_PROJECT_URL,
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager):
119
+ archive = dl_manager.download(_DOWNLOAD_URL)
120
+
121
+ return [
122
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive)}),
123
+ ]
124
+
125
+ def _generate_examples(self, files):
126
+ id_ = 0
127
+ for article_file_path, f in files:
128
+ if fnmatch(os.path.basename(article_file_path), "*.json"):
129
+ article = json.load(f)
130
+ yield id_, {
131
+ "title": article["title"].strip() if article["title"] is not None else "",
132
+ "text": article["maintext"].strip() if article["maintext"] is not None else "",
133
+ "domain": article["source_domain"].strip() if article["source_domain"] is not None else "",
134
+ "date": article["date_publish"].strip() if article["date_publish"] is not None else "",
135
+ "description": article["description"].strip() if article["description"] is not None else "",
136
+ "url": article["url"].strip() if article["url"] is not None else "",
137
+ "image_url": article["image_url"].strip() if article["image_url"] is not None else "",
138
+ }
139
+ id_ += 1
140
+