Datasets:

ArXiv:
License:
omshinde commited on
Commit
83d1a10
·
verified ·
1 Parent(s): 56e2a90

file for loading data with HF datasets load_dataset() module

Browse files
long_term_precipitation_forecast/dataset.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+
4
+ class LongTermPrecipitationDataset(datasets.GeneratorBasedBuilder):
5
+ VERSION = datasets.Version("1.0.0")
6
+
7
+ def _info(self):
8
+ """
9
+ Defines the dataset metadata and feature structure.
10
+ """
11
+ return datasets.DatasetInfo(
12
+ description="Dataset containing .nc files per year for variables.",
13
+ features=datasets.Features({
14
+ "file_path": datasets.Value("string"), # Store file paths
15
+ "year": datasets.Value("string"), # Track year
16
+ "subfolder": datasets.Value("string") # Track subfolder (sf1, sf2)
17
+ }),
18
+ supervised_keys=None, # Update if supervised task is defined
19
+ homepage="https://huggingface.co/datasets/nasa-impact/WINDSET/tree/main/long_term_precipitation_forecast",
20
+ license="MIT",
21
+ )
22
+
23
+ def _split_generators(self, dl_manager):
24
+ """
25
+ Define the dataset splits for train, validation, and test.
26
+ """
27
+ # Define the directory containing the dataset
28
+ data_dir = os.path.join(os.getcwd(), "long_term_precipitation_forecast")
29
+
30
+ # Get the directories for each split
31
+ train_dir = os.path.join(data_dir, "training_data")
32
+ validation_dir = os.path.join(data_dir, "validation_data")
33
+ test_dir = os.path.join(data_dir, "test_data")
34
+
35
+ return [
36
+ datasets.SplitGenerator(
37
+ name=datasets.Split.TRAIN,
38
+ gen_kwargs={"split_dir": train_dir},
39
+ ),
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.VALIDATION,
42
+ gen_kwargs={"split_dir": validation_dir},
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.TEST,
46
+ gen_kwargs={"split_dir": test_dir},
47
+ ),
48
+ ]
49
+
50
+ def _get_subfolders(self, base_dir):
51
+ """
52
+ Get all subfolders from the base directory.
53
+ """
54
+ return [os.path.join(base_dir, subfolder) for subfolder in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, subfolder))]
55
+
56
+ def _get_year_folders(self, subfolder_dir):
57
+ """
58
+ Get all year folders inside a subfolder.
59
+ """
60
+ return [os.path.join(subfolder_dir, year_folder) for year_folder in os.listdir(subfolder_dir) if os.path.isdir(os.path.join(subfolder_dir, year_folder))]
61
+
62
+ def _generate_data_from_files(self, data_dir):
63
+ """
64
+ Generate file paths for each subfolder, year, and daily file.
65
+ """
66
+ example_id = 0
67
+
68
+ # Loop through subfolders
69
+ for subfolder in os.listdir(data_dir):
70
+ subfolder_path = os.path.join(data_dir, subfolder)
71
+
72
+ if os.path.isdir(subfolder_path):
73
+ # Loop through year folders inside the subfolder
74
+ for year_folder in os.listdir(subfolder_path):
75
+ year_folder_path = os.path.join(subfolder_path, year_folder)
76
+
77
+ if os.path.isdir(year_folder_path):
78
+ # Loop through daily files inside the year folder
79
+ for daily_file in os.listdir(year_folder_path):
80
+ daily_file_path = os.path.join(year_folder_path, daily_file)
81
+
82
+ if daily_file.endswith(".nc"): # Only select NetCDF files
83
+ # Yield file information for each data point
84
+ yield example_id, {
85
+ "file_path": daily_file_path,
86
+ "year": year_folder,
87
+ "subfolder": subfolder,
88
+ }
89
+ example_id += 1
90
+ else:
91
+ raise FileNotFoundError(f"{daily_file_path} not found")
92
+
93
+ def _generate_examples(self, split_dir):
94
+ """
95
+ Generates examples for the dataset from the split directory.
96
+ """
97
+ # Call the data generator to get the file paths
98
+ for example_id, example in self._generate_data_from_files(split_dir):
99
+ yield example_id, example