amazon / amazon.py
Yijun Xiao
Added initial script
6f241aa
import csv
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
"""
_URLS = {
"clothing": "https://drive.google.com/u/0/uc?id=1HP3EPX9Q8JffUUZz2czXD7qudzvitscq&export=download",
"electronics": "https://drive.google.com/u/0/uc?id=1W50FNd0707qK1CCktEF30nlDqsImLg3X&export=download",
"office": "https://drive.google.com/u/0/uc?id=1lsttnBIjFD4nQw9idZYQNUWKSzj5VibD&export=download",
}
_FIELDS = ["date", "rating", "reviewText", "summary"]
_RATINGS = ["1", "2", "3", "4", "5"]
class AmazonConfig(datasets.BuilderConfig):
def __init__(
self,
training_files,
testing_files,
url,
label_classes=_RATINGS,
**kwargs,
):
super().__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.label_classes = label_classes
self.training_files = training_files
self.testing_files = testing_files
self.url = url
class Amazon(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
AmazonConfig(
name="clothing_majorshift01",
description="",
url=_URLS["clothing"],
training_files=[
"201011.csv",
"201012.csv",
"201101.csv",
"201102.csv",
"201103.csv",
"201104.csv",
"201105.csv",
"201106.csv",
"201107.csv",
"201108.csv",
"201109.csv",
"201110.csv",
"201111.csv",
"201112.csv",
"201201.csv",
"201202.csv",
"201203.csv",
"201204.csv",
"201205.csv",
"201206.csv",
"201207.csv",
"201208.csv",
"201209.csv",
"201210.csv",
],
testing_files=[
"201211.csv",
"201212.csv",
"201301.csv",
"201302.csv",
"201303.csv",
"201304.csv",
],
),
AmazonConfig(
name="clothing_majorshift02",
description="",
url=_URLS["clothing"],
training_files=[
"200808.csv",
"200809.csv",
"200810.csv",
"200811.csv",
"200812.csv",
"200901.csv",
"200902.csv",
"200903.csv",
"200904.csv",
"200905.csv",
"200906.csv",
"200907.csv",
"200908.csv",
"200909.csv",
"200910.csv",
"200911.csv",
"200912.csv",
"201001.csv",
"201002.csv",
"201003.csv",
"201004.csv",
"201005.csv",
"201006.csv",
"201007.csv",
],
testing_files=[
"201008.csv",
"201009.csv",
"201010.csv",
"201011.csv",
"201012.csv",
"201101.csv",
],
),
AmazonConfig(
name="clothing_majorshift03",
description="",
url=_URLS["clothing"],
training_files=[
"201602.csv",
"201603.csv",
"201604.csv",
"201605.csv",
"201606.csv",
"201607.csv",
"201608.csv",
"201609.csv",
"201610.csv",
"201611.csv",
"201612.csv",
"201701.csv",
"201702.csv",
"201703.csv",
"201704.csv",
"201705.csv",
"201706.csv",
"201707.csv",
"201708.csv",
"201709.csv",
"201710.csv",
"201711.csv",
"201712.csv",
"201801.csv",
],
testing_files=[
"201802.csv",
"201803.csv",
"201804.csv",
"201805.csv",
"201806.csv",
"201807.csv",
],
),
]
def _info(self):
features = {
"date": datasets.Value("string"),
"id": datasets.Value("int32"),
"label": datasets.features.ClassLabel(names=self.config.label_classes),
"text": datasets.Value("string"),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
)
def _split_generators(self, dl_manager):
dirname = dl_manager.download_and_extract(self.config.url)
logger.info(str(dirname))
category = self.config.name.split("_")[
0
] # extract category name from the config
train_filepaths = tuple(
os.path.join(dirname, category, fname)
for fname in self.config.training_files
)
test_filepaths = tuple(
os.path.join(dirname, category, fname)
for fname in self.config.testing_files
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_filepaths},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepaths": test_filepaths},
),
]
def _generate_examples(self, filepaths):
logger.info(f"generating examples from {len(filepaths)} files")
idx = 0
for filepath in filepaths:
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f, fieldnames=_FIELDS)
for row in reader:
yield idx, {
"date": row["date"],
"id": idx,
"label": row["rating"],
"text": row["reviewText"],
}
idx += 1