parquet-converter commited on
Commit
81795ce
·
1 Parent(s): 6f241aa

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
amazon.py DELETED
@@ -1,208 +0,0 @@
1
- import csv
2
- import os
3
-
4
- import datasets
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
- _DESCRIPTION = """
9
- """
10
-
11
- _URLS = {
12
- "clothing": "https://drive.google.com/u/0/uc?id=1HP3EPX9Q8JffUUZz2czXD7qudzvitscq&export=download",
13
- "electronics": "https://drive.google.com/u/0/uc?id=1W50FNd0707qK1CCktEF30nlDqsImLg3X&export=download",
14
- "office": "https://drive.google.com/u/0/uc?id=1lsttnBIjFD4nQw9idZYQNUWKSzj5VibD&export=download",
15
- }
16
-
17
- _FIELDS = ["date", "rating", "reviewText", "summary"]
18
- _RATINGS = ["1", "2", "3", "4", "5"]
19
-
20
-
21
- class AmazonConfig(datasets.BuilderConfig):
22
- def __init__(
23
- self,
24
- training_files,
25
- testing_files,
26
- url,
27
- label_classes=_RATINGS,
28
- **kwargs,
29
- ):
30
- super().__init__(version=datasets.Version("1.0.0", ""), **kwargs)
31
- self.label_classes = label_classes
32
- self.training_files = training_files
33
- self.testing_files = testing_files
34
- self.url = url
35
-
36
-
37
- class Amazon(datasets.GeneratorBasedBuilder):
38
- BUILDER_CONFIGS = [
39
- AmazonConfig(
40
- name="clothing_majorshift01",
41
- description="",
42
- url=_URLS["clothing"],
43
- training_files=[
44
- "201011.csv",
45
- "201012.csv",
46
- "201101.csv",
47
- "201102.csv",
48
- "201103.csv",
49
- "201104.csv",
50
- "201105.csv",
51
- "201106.csv",
52
- "201107.csv",
53
- "201108.csv",
54
- "201109.csv",
55
- "201110.csv",
56
- "201111.csv",
57
- "201112.csv",
58
- "201201.csv",
59
- "201202.csv",
60
- "201203.csv",
61
- "201204.csv",
62
- "201205.csv",
63
- "201206.csv",
64
- "201207.csv",
65
- "201208.csv",
66
- "201209.csv",
67
- "201210.csv",
68
- ],
69
- testing_files=[
70
- "201211.csv",
71
- "201212.csv",
72
- "201301.csv",
73
- "201302.csv",
74
- "201303.csv",
75
- "201304.csv",
76
- ],
77
- ),
78
- AmazonConfig(
79
- name="clothing_majorshift02",
80
- description="",
81
- url=_URLS["clothing"],
82
- training_files=[
83
- "200808.csv",
84
- "200809.csv",
85
- "200810.csv",
86
- "200811.csv",
87
- "200812.csv",
88
- "200901.csv",
89
- "200902.csv",
90
- "200903.csv",
91
- "200904.csv",
92
- "200905.csv",
93
- "200906.csv",
94
- "200907.csv",
95
- "200908.csv",
96
- "200909.csv",
97
- "200910.csv",
98
- "200911.csv",
99
- "200912.csv",
100
- "201001.csv",
101
- "201002.csv",
102
- "201003.csv",
103
- "201004.csv",
104
- "201005.csv",
105
- "201006.csv",
106
- "201007.csv",
107
- ],
108
- testing_files=[
109
- "201008.csv",
110
- "201009.csv",
111
- "201010.csv",
112
- "201011.csv",
113
- "201012.csv",
114
- "201101.csv",
115
- ],
116
- ),
117
- AmazonConfig(
118
- name="clothing_majorshift03",
119
- description="",
120
- url=_URLS["clothing"],
121
- training_files=[
122
- "201602.csv",
123
- "201603.csv",
124
- "201604.csv",
125
- "201605.csv",
126
- "201606.csv",
127
- "201607.csv",
128
- "201608.csv",
129
- "201609.csv",
130
- "201610.csv",
131
- "201611.csv",
132
- "201612.csv",
133
- "201701.csv",
134
- "201702.csv",
135
- "201703.csv",
136
- "201704.csv",
137
- "201705.csv",
138
- "201706.csv",
139
- "201707.csv",
140
- "201708.csv",
141
- "201709.csv",
142
- "201710.csv",
143
- "201711.csv",
144
- "201712.csv",
145
- "201801.csv",
146
- ],
147
- testing_files=[
148
- "201802.csv",
149
- "201803.csv",
150
- "201804.csv",
151
- "201805.csv",
152
- "201806.csv",
153
- "201807.csv",
154
- ],
155
- ),
156
- ]
157
-
158
- def _info(self):
159
- features = {
160
- "date": datasets.Value("string"),
161
- "id": datasets.Value("int32"),
162
- "label": datasets.features.ClassLabel(names=self.config.label_classes),
163
- "text": datasets.Value("string"),
164
- }
165
- return datasets.DatasetInfo(
166
- description=_DESCRIPTION,
167
- features=datasets.Features(features),
168
- )
169
-
170
- def _split_generators(self, dl_manager):
171
- dirname = dl_manager.download_and_extract(self.config.url)
172
- logger.info(str(dirname))
173
- category = self.config.name.split("_")[
174
- 0
175
- ] # extract category name from the config
176
- train_filepaths = tuple(
177
- os.path.join(dirname, category, fname)
178
- for fname in self.config.training_files
179
- )
180
- test_filepaths = tuple(
181
- os.path.join(dirname, category, fname)
182
- for fname in self.config.testing_files
183
- )
184
- return [
185
- datasets.SplitGenerator(
186
- name=datasets.Split.TRAIN,
187
- gen_kwargs={"filepaths": train_filepaths},
188
- ),
189
- datasets.SplitGenerator(
190
- name=datasets.Split.TEST,
191
- gen_kwargs={"filepaths": test_filepaths},
192
- ),
193
- ]
194
-
195
- def _generate_examples(self, filepaths):
196
- logger.info(f"generating examples from {len(filepaths)} files")
197
- idx = 0
198
- for filepath in filepaths:
199
- with open(filepath, encoding="utf-8") as f:
200
- reader = csv.DictReader(f, fieldnames=_FIELDS)
201
- for row in reader:
202
- yield idx, {
203
- "date": row["date"],
204
- "id": idx,
205
- "label": row["rating"],
206
- "text": row["reviewText"],
207
- }
208
- idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
clothing_majorshift01/amazon-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3cbb58c9e2e05dd2f882b9ae4f15c31aae5035aae8c743de0227428a4bdb9d6
3
+ size 42185042
clothing_majorshift01/amazon-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4673dbae79f2666bb8ac03eace041b8e8ab8926a5d8714bb2454f6098091e401
3
+ size 37040237
clothing_majorshift02/amazon-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a302c306732aff4768134c268fc2df611afa44bdf2e8d897f8e9b1219ff92c6
3
+ size 4043072
clothing_majorshift02/amazon-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e33cd66a4ec8ec8e6a58c2b9b30ed659270fda183eb14696c5776fd1e553ac
3
+ size 6615564
clothing_majorshift03/amazon-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:106f8dd1fca89a01f7bd99644f86591b8379c33cac35fac191317814b4191604
3
+ size 61768861
clothing_majorshift03/amazon-train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:139dd0ff04ec6fbd7d4bcaf6a07b94926434f8e54fa4ae5f70aa1fd0c639f24b
3
+ size 282557949
clothing_majorshift03/amazon-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86751a51fc79dfc924a19bd0d135a9eef150d51939d8a7101263e031202a07a9
3
+ size 268933786