"""TODO: Add a description here.""" import csv import json import os import numpy as np from pathlib import Path import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @article{Li_2008, title={A public turbulence database cluster and applications to study Lagrangian evolution of velocity increments in turbulence}, volume={9}, ISSN={1468-5248}, url={http://dx.doi.org/10.1080/14685240802376389}, DOI={10.1080/14685240802376389}, journal={Journal of Turbulence}, publisher={Informa UK Limited}, author={Li, Yi and Perlman, Eric and Wan, Minping and Yang, Yunke and Meneveau, Charles and Burns, Randal and Chen, Shiyi and Szalay, Alexander and Eyink, Gregory}, year={2008}, month=jan, pages={N31} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" _BASE_URL = "https://huggingface.co/datasets/dl2-g32/jhtdb/resolve/main" _URLS = { "small_50": { "train": ( "datasets/jhtdb/small_50/metadata_train.csv", "datasets/jhtdb/small_50/train.zip", ), "val": ( "datasets/jhtdb/small_50/metadata_val.csv", "datasets/jhtdb/small_50/val.zip", ), "test": ( "datasets/jhtdb/small_50/metadata_test.csv", "datasets/jhtdb/small_50/test.zip", ), }, "large_50": { "train": ( "datasets/jhtdb/large_50/metadata_train.csv", "datasets/jhtdb/large_50/train.zip", ), "val": ( "datasets/jhtdb/large_50/metadata_val.csv", "datasets/jhtdb/large_50/val.zip", ), "test": ( "datasets/jhtdb/large_50/metadata_test.csv", "datasets/jhtdb/large_50/test.zip", ), }, "large_100": { "train": ( "datasets/jhtdb/large_100/metadata_train.csv", "datasets/jhtdb/large_100/train.zip", ), "val": ( "datasets/jhtdb/large_100/metadata_val.csv", "datasets/jhtdb/large_100/val.zip", ), "test": ( "datasets/jhtdb/large_100/metadata_test.csv", "datasets/jhtdb/large_100/test.zip", ), }, } class JHTDB(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="small_50", version=VERSION, description=""), datasets.BuilderConfig(name="large_50", version=VERSION, description=""), datasets.BuilderConfig(name="large_100", version=VERSION, description=""), ] DEFAULT_CONFIG_NAME = "large_50" def _info(self): if self.config.name.startswith("small"): features = datasets.Features( { "lrs": datasets.Sequence( datasets.Array4D(shape=(3, 4, 4, 4), dtype="float32"), ), "hr": datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"), } ) elif self.config.name.startswith("large"): features = datasets.Features( { "lrs": datasets.Sequence( datasets.Array4D(shape=(3, 16, 16, 16), dtype="float32"), ), "hr": datasets.Array4D(shape=(3, 64, 64, 64), dtype="float32"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): urls = _URLS[self.config.name] urls = { k: (f"{_BASE_URL}/{v[0]}", f"{_BASE_URL}/{v[1]}") for k, v in urls.items() } data_dir = dl_manager.download_and_extract(urls) named_splits = { "train": datasets.Split.TRAIN, "val": datasets.Split.VALIDATION, "test": datasets.Split.TEST, } return [ datasets.SplitGenerator( name=named_splits[split], gen_kwargs={ "metadata_path": Path(metadata_path), "data_path": Path(data_path), }, ) for split, (metadata_path, data_path) in data_dir.items() ] def _generate_examples(self, metadata_path: Path, data_path: Path): with open(metadata_path) as f: reader = csv.DictReader(f) for key, data in enumerate(reader): yield key, { "lrs": [ np.load(data_path / Path(p).name) for p in json.loads(data["lr_paths"]) ], "hr": np.load(data_path / Path(data["hr_path"]).name), }