File size: 5,405 Bytes
8d31e52 4ce79c2 8d31e52 91c633a 8d31e52 91c633a 8d31e52 fe091d8 8d31e52 91c633a fe091d8 8d31e52 fe091d8 86cbaf4 8d31e52 fe091d8 8d31e52 fe091d8 91fa821 8f389a4 e236fee 8f389a4 75f1e02 fe091d8 8d31e52 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
import json
import os
import tarfile
from fnmatch import fnmatch
import datasets
def custom_iter_archive(path_or_buf, _filter=lambda x: True):
def _iter_archive(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for i, tarinfo in enumerate(stream):
if not _filter(i):
continue
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith(".") or os.path.basename(file_path).startswith("__"):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
if hasattr(path_or_buf, "read"):
yield from _iter_archive(path_or_buf)
else:
with open(path_or_buf, "rb") as f:
yield from _iter_archive(f)
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
CC-News containing news articles from news sites all over the world \
The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. \
This version of the dataset has 708241 articles. It represents a small portion of English \
language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to \
collect and extract English language portion of CC-News.
"""
_CITATION = """\
@InProceedings{Hamborg2017,
author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},
title = {news-please: A Generic News Crawler and Extractor},
year = {2017},
booktitle = {Proceedings of the 15th International Symposium of Information Science},
location = {Berlin},
doi = {10.5281/zenodo.4120316},
pages = {218--223},
month = {March}
}
"""
_PROJECT_URL = "https://commoncrawl.org/2016/10/news-dataset-available/"
_DOWNLOAD_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/cc_news/cc_news.tar.gz"
class CCNewsConfig(datasets.BuilderConfig):
"""BuilderConfig for CCNews."""
def __init__(self, **kwargs):
"""BuilderConfig for CCNews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CCNewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class CCNews(datasets.GeneratorBasedBuilder):
"""CC-News dataset."""
BUILDER_CONFIGS = [
CCNewsConfig(
name="plain_text",
description="Plain text",
),
CCNewsConfig(
name="plain_text_sentences",
description="Plain text (sentence level)",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DOWNLOAD_URL)
train_filter = lambda x : (x%10) < 8
val_filter = lambda x: (x%10) == 8
test_filter = lambda x: (x%10) == 9
level = "doc" if self.config.name == "plain_text" else "sentence"
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": custom_iter_archive(archive, train_filter), "level": level}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": custom_iter_archive(archive, val_filter), "level": level}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": custom_iter_archive(archive, test_filter), "level": level}),
]
def _generate_examples(self, files, level):
id_ = 0
for article_file_path, f in files:
if fnmatch(os.path.basename(article_file_path), "*.json"):
article = json.load(f)
if level == "sentence":
full_article = article["maintext"].strip() if article["maintext"] is not None else ""
doc_dict = {}
for sent in full_article.split("\n"):
doc_dict["text"] = sent
yield id_, doc_dict
id_ += 1
else:
yield id_, {
"text": article["maintext"].strip() if article["maintext"] is not None else "",
}
id_ += 1
|