mstz commited on
Commit
5edfd91
·
1 Parent(s): 1f2803b

Upload covertype.py

Browse files
Files changed (1) hide show
  1. covertype.py +91 -69
covertype.py CHANGED
@@ -19,59 +19,61 @@ urls_per_split = {
19
  "train": "https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz"
20
  }
21
  _BASE_FEATURE_NAMES = [
22
- "elevation",
23
- "aspect",
24
- "slope",
25
- "horizontal_distance_to_hydrology",
26
- "vertical_distance_to_hydrology",
27
- "horizontal_distance_to_roadways",
28
- "hillshade_9am",
29
- "hillshade_noon",
30
- "hillshade_3pm",
31
- "horizontal_distance_to_fire_points",
32
- "is_a_wilderness_area",
33
- "soil_type_id_0",
34
- "soil_type_id_1",
35
- "soil_type_id_2",
36
- "soil_type_id_3",
37
- "soil_type_id_4",
38
- "soil_type_id_5",
39
- "soil_type_id_6",
40
- "soil_type_id_7",
41
- "soil_type_id_8",
42
- "soil_type_id_9",
43
- "soil_type_id_10",
44
- "soil_type_id_11",
45
- "soil_type_id_12",
46
- "soil_type_id_13",
47
- "soil_type_id_14",
48
- "soil_type_id_15",
49
- "soil_type_id_16",
50
- "soil_type_id_17",
51
- "soil_type_id_18",
52
- "soil_type_id_19",
53
- "soil_type_id_20",
54
- "soil_type_id_21",
55
- "soil_type_id_22",
56
- "soil_type_id_23",
57
- "soil_type_id_24",
58
- "soil_type_id_25",
59
- "soil_type_id_26",
60
- "soil_type_id_27",
61
- "soil_type_id_28",
62
- "soil_type_id_29",
63
- "soil_type_id_30",
64
- "soil_type_id_31",
65
- "soil_type_id_32",
66
- "soil_type_id_33",
67
- "soil_type_id_34",
68
- "soil_type_id_35",
69
- "soil_type_id_36",
70
- "soil_type_id_37",
71
- "soil_type_id_38",
72
- "soil_type_id_39",
73
- "soil_type",
74
- "cover_type"
 
 
75
  ]
76
  features_types_per_config = {
77
  "covertype": {
@@ -85,7 +87,10 @@ features_types_per_config = {
85
  "hillshade_noon": datasets.Value("float32"),
86
  "hillshade_3pm": datasets.Value("float32"),
87
  "horizontal_distance_to_fire_points": datasets.Value("float32"),
88
- "is_a_wilderness_area": datasets.Value("bool"),
 
 
 
89
  "soil_type_id_0": datasets.Value("bool"),
90
  "soil_type_id_1": datasets.Value("bool"),
91
  "soil_type_id_2": datasets.Value("bool"),
@@ -135,8 +140,11 @@ features_per_config = {k: datasets.Features(features_types_per_config[k]) for k
135
 
136
 
137
  class CovertypeConfig(datasets.BuilderConfig):
138
- def __init__(self, **kwargs):
139
- super(CovertypeConfig, self).__init__(version=VERSION, **kwargs)
 
 
 
140
  self.features = features_per_config[kwargs["name"]]
141
 
142
 
@@ -144,7 +152,7 @@ class Covertype(datasets.GeneratorBasedBuilder):
144
  # dataset versions
145
  DEFAULT_CONFIG = "covertype"
146
  BUILDER_CONFIGS = [
147
- CovertypeConfig(name="covertype",
148
  description="Covertype for multiclass classification.")
149
  ]
150
 
@@ -153,35 +161,49 @@ class Covertype(datasets.GeneratorBasedBuilder):
153
  if self.config.name not in features_per_config:
154
  raise ValueError(f"Unknown configuration: {self.config.name}")
155
 
156
- info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
 
 
157
  features=features_per_config[self.config.name])
158
 
159
  return info
160
 
161
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
 
162
  downloads = dl_manager.download_and_extract(urls_per_split)
163
 
164
  return [
165
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
 
166
  ]
167
 
168
- def _generate_examples(self, filepath: str):
 
169
  # try:
170
  # with gzip.open(filepath) as log:
171
- # data = pandas.read_csv(log, header=None)
 
172
  # except gzip.BadGzipFile:
173
- data = pandas.read_csv(filepath, header=None)
 
174
  print(data.columns)
175
- print(data.shape[1], len(_BASE_FEATURE_NAMES))
 
176
  data.columns = _BASE_FEATURE_NAMES
177
- data = self.preprocess(data, config=self.config.name)
 
178
 
179
- for row_id, row in data.iterrows():
 
180
  data_row = dict(row)
181
 
182
- yield row_id, data_row
 
183
 
184
- def preprocess(self, data: pandas.DataFrame, config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
185
- data.loc[:, "cover_type"] = data["cover_type"].apply(lambda x: x - 1)
 
 
 
186
 
187
  return data
 
19
  "train": "https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz"
20
  }
21
  _BASE_FEATURE_NAMES = [
22
+ "elevation",
23
+ "aspect",
24
+ "slope",
25
+ "horizontal_distance_to_hydrology",
26
+ "vertical_distance_to_hydrology",
27
+ "horizontal_distance_to_roadways",
28
+ "hillshade_9am",
29
+ "hillshade_noon",
30
+ "hillshade_3pm",
31
+ "horizontal_distance_to_fire_points",
32
+ "wilderness_area_id_0",
33
+ "wilderness_area_id_1",
34
+ "wilderness_area_id_2",
35
+ "wilderness_area_id_3",
36
+ "soil_type_id_0",
37
+ "soil_type_id_1",
38
+ "soil_type_id_2",
39
+ "soil_type_id_3",
40
+ "soil_type_id_4",
41
+ "soil_type_id_5",
42
+ "soil_type_id_6",
43
+ "soil_type_id_7",
44
+ "soil_type_id_8",
45
+ "soil_type_id_9",
46
+ "soil_type_id_10",
47
+ "soil_type_id_11",
48
+ "soil_type_id_12",
49
+ "soil_type_id_13",
50
+ "soil_type_id_14",
51
+ "soil_type_id_15",
52
+ "soil_type_id_16",
53
+ "soil_type_id_17",
54
+ "soil_type_id_18",
55
+ "soil_type_id_19",
56
+ "soil_type_id_20",
57
+ "soil_type_id_21",
58
+ "soil_type_id_22",
59
+ "soil_type_id_23",
60
+ "soil_type_id_24",
61
+ "soil_type_id_25",
62
+ "soil_type_id_26",
63
+ "soil_type_id_27",
64
+ "soil_type_id_28",
65
+ "soil_type_id_29",
66
+ "soil_type_id_30",
67
+ "soil_type_id_31",
68
+ "soil_type_id_32",
69
+ "soil_type_id_33",
70
+ "soil_type_id_34",
71
+ "soil_type_id_35",
72
+ "soil_type_id_36",
73
+ "soil_type_id_37",
74
+ "soil_type_id_38",
75
+ "soil_type_id_39",
76
+ "cover_type"
77
  ]
78
  features_types_per_config = {
79
  "covertype": {
 
87
  "hillshade_noon": datasets.Value("float32"),
88
  "hillshade_3pm": datasets.Value("float32"),
89
  "horizontal_distance_to_fire_points": datasets.Value("float32"),
90
+ "wilderness_area_id_0": datasets.Value("bool"),
91
+ "wilderness_area_id_1": datasets.Value("bool"),
92
+ "wilderness_area_id_2": datasets.Value("bool"),
93
+ "wilderness_area_id_3": datasets.Value("bool"),
94
  "soil_type_id_0": datasets.Value("bool"),
95
  "soil_type_id_1": datasets.Value("bool"),
96
  "soil_type_id_2": datasets.Value("bool"),
 
140
 
141
 
142
  class CovertypeConfig(datasets.BuilderConfig):
143
+ def __init__(self",
144
+ " **kwargs):
145
+ super(CovertypeConfig",
146
+ " self).__init__(version=VERSION",
147
+ " **kwargs)
148
  self.features = features_per_config[kwargs["name"]]
149
 
150
 
 
152
  # dataset versions
153
  DEFAULT_CONFIG = "covertype"
154
  BUILDER_CONFIGS = [
155
+ CovertypeConfig(name="covertype"",
156
  description="Covertype for multiclass classification.")
157
  ]
158
 
 
161
  if self.config.name not in features_per_config:
162
  raise ValueError(f"Unknown configuration: {self.config.name}")
163
 
164
+ info = datasets.DatasetInfo(description=DESCRIPTION",
165
+ " citation=_CITATION",
166
+ " homepage=_HOMEPAGE",
167
  features=features_per_config[self.config.name])
168
 
169
  return info
170
 
171
+ def _split_generators(self",
172
+ " dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
173
  downloads = dl_manager.download_and_extract(urls_per_split)
174
 
175
  return [
176
+ datasets.SplitGenerator(name=datasets.Split.TRAIN",
177
+ " gen_kwargs={"filepath": downloads["train"]})
178
  ]
179
 
180
+ def _generate_examples(self",
181
+ " filepath: str):
182
  # try:
183
  # with gzip.open(filepath) as log:
184
+ # data = pandas.read_csv(log",
185
+ " header=None)
186
  # except gzip.BadGzipFile:
187
+ data = pandas.read_csv(filepath",
188
+ " header=None)
189
  print(data.columns)
190
+ print(data.shape[1]",
191
+ " len(_BASE_FEATURE_NAMES))
192
  data.columns = _BASE_FEATURE_NAMES
193
+ data = self.preprocess(data",
194
+ " config=self.config.name)
195
 
196
+ for row_id",
197
+ " row in data.iterrows():
198
  data_row = dict(row)
199
 
200
+ yield row_id",
201
+ " data_row
202
 
203
+ def preprocess(self",
204
+ " data: pandas.DataFrame",
205
+ " config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
206
+ data.loc[:",
207
+ " "cover_type"] = data["cover_type"].apply(lambda x: x - 1)
208
 
209
  return data