|
import numpy as np |
|
import json |
|
from jsonargparse import CLI |
|
import pandas as pd |
|
|
|
import pyJHTDB |
|
import pyJHTDB.dbinfo |
|
from tqdm import tqdm |
|
from pathlib import Path |
|
from itertools import chain |
|
import zipfile |
|
|
|
|
|
def get_filename( |
|
time_step: int, |
|
start: np.ndarray, |
|
end: np.ndarray, |
|
step: np.ndarray, |
|
filter_width: int, |
|
): |
|
"""Serializes jhtdb params into a filename.""" |
|
return "{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}_{10}.npy".format( |
|
time_step, |
|
start[0], |
|
start[1], |
|
start[2], |
|
end[0], |
|
end[1], |
|
end[2], |
|
step[0], |
|
step[1], |
|
step[2], |
|
filter_width, |
|
) |
|
|
|
|
|
def download_jhtdb( |
|
loader: pyJHTDB.libJHTDB, |
|
time_step: int, |
|
start: np.ndarray, |
|
end: np.ndarray, |
|
step: np.ndarray, |
|
filter_width: int, |
|
path: Path, |
|
dataset: str = "isotropic1024coarse", |
|
field: str = "u", |
|
): |
|
""" |
|
:param loader: pyJHTDB.libJHTDB object |
|
:param time_step: time step to download |
|
:param start: start [x, y, z] of the cutout |
|
:param end: end [x, y, z] of the cutout |
|
:param step: step size of the cutout |
|
:param filter_width: filter width of the cutout |
|
:param path: path to save the data |
|
:param dataset: dataset to download from. Default is "isotropic1024coarse" |
|
:param field: velocity ("u") or pressure ("p") field |
|
""" |
|
if not path.exists(): |
|
results: np.ndarray = loader.getCutout( |
|
data_set=dataset, |
|
field=field, |
|
time_step=time_step, |
|
start=start, |
|
end=end, |
|
step=step, |
|
filter_width=filter_width, |
|
) |
|
if results is None: |
|
raise Exception("Could not download data from JHTDB") |
|
results = np.rollaxis( |
|
results, -1, 0 |
|
) |
|
np.save(path, results) |
|
return np.load(path) |
|
|
|
|
|
def download_all(params: dict, loader: pyJHTDB.libJHTDB): |
|
"""Download all the data from the JHTDB database. |
|
TODO: parallelize this function |
|
""" |
|
for p in tqdm(params): |
|
download_jhtdb(loader=loader, **p) |
|
|
|
|
|
def get_params( |
|
total_samples: int, |
|
domain_size: int, |
|
lr_factor: int, |
|
time_range: list[int], |
|
window_size: int, |
|
) -> tuple[dict, dict]: |
|
dt = np.arange(window_size) - window_size // 2 |
|
time_steps_hr = np.random.randint(time_range[0], time_range[1], size=total_samples) |
|
|
|
time_steps_lr = np.repeat(time_steps_hr[:, np.newaxis], len(dt), axis=1) + dt |
|
|
|
starts = np.random.randint(1, 1024 - domain_size, size=(total_samples, 3)) |
|
ends = starts + domain_size - 1 |
|
all_params_lr = [ |
|
[ |
|
{ |
|
"time_step": time_steps_lr[i, j], |
|
"start": starts[i], |
|
"end": ends[i], |
|
"step": np.full(3, lr_factor, dtype=int), |
|
"filter_width": lr_factor, |
|
} |
|
for j in range(len(dt)) |
|
] |
|
for i in range(total_samples) |
|
] |
|
all_params_hr = [ |
|
{ |
|
"time_step": time_steps_hr[i], |
|
"start": starts[i], |
|
"end": ends[i], |
|
"step": np.ones(3, dtype=int), |
|
"filter_width": 1, |
|
} |
|
for i in range(total_samples) |
|
] |
|
return all_params_lr, all_params_hr |
|
|
|
|
|
def download_generic( |
|
total_samples: int, |
|
domain_size: int, |
|
lr_factor: int, |
|
time_range: tuple[int, int], |
|
window_size: int, |
|
tmp_data_dir: Path, |
|
token: str, |
|
): |
|
"""Download all the data from the JHTDB database.""" |
|
|
|
lJHTDB = pyJHTDB.libJHTDB() |
|
lJHTDB.initialize() |
|
lJHTDB.add_token(token) |
|
tmp_data_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
all_params_lr, all_params_hr = get_params( |
|
total_samples, domain_size, lr_factor, time_range, window_size |
|
) |
|
|
|
all_params_hr = [ |
|
dict(p, path=tmp_data_dir / get_filename(**p)) for p in all_params_hr |
|
] |
|
all_params_lr = [ |
|
[dict(p, path=tmp_data_dir / get_filename(**p)) for p in lr] |
|
for lr in all_params_lr |
|
] |
|
|
|
|
|
|
|
|
|
all_params = list(chain.from_iterable(all_params_lr)) + all_params_hr |
|
download_all(all_params, lJHTDB) |
|
return all_params_lr, all_params_hr, all_params |
|
|
|
|
|
def make_jhtdb_dataset( |
|
name: str, |
|
total_samples: int = 128, |
|
train_split: float = 0.8, |
|
val_split: float = 0.1, |
|
test_split: float = 0.1, |
|
domain_size: int = 64, |
|
lr_factor: int = 4, |
|
root: Path = Path("dataset/jhtdb"), |
|
time_range: tuple[int, int] = (2, 1023), |
|
window_size: int = 3, |
|
seed: int = 123, |
|
token: str = "edu.jhu.pha.turbulence.testing-201311", |
|
) -> tuple[np.ndarray, np.ndarray]: |
|
"""Creates low and high res dataset from JHTDB database. |
|
|
|
Where: |
|
low_res.shape = [nr_samples, 3, domain_size / lr_factor, domain_size / lr_factor, domain_size / lr_factor] |
|
high_res.shape = [nr_samples, 3, domain_size, domain_size, domain_size] |
|
And 3 corresponds to the x, y, z components of the velocity field. |
|
|
|
Make a dataset from the JHTDB database. |
|
:param: name: name of the dataset |
|
:param: total_samples: total number of samples to generate |
|
:param: train_split: percentage of samples to use for training |
|
:param: val_split: percentage of samples to use for validation |
|
:param: test_split: percentage of samples to use for testing |
|
:param: domain_size: size of the domain to generate |
|
:param: lr_factor: factor to downsample the data |
|
:param: root: root directory to store the dataset |
|
:param: time_range: range of time steps to sample from |
|
:param: seed: seed to generate the dataset |
|
:param: window_size: size of the window to sample from |
|
:param: token: token to access the JHTDB database |
|
:return: tuple of low res and high res data |
|
""" |
|
assert window_size % 2 == 1, "Window size must be odd" |
|
assert time_range[0] - window_size // 2 >= 1, "Time step out of range" |
|
assert time_range[1] + window_size // 2 <= 1024, "Time step out of range" |
|
assert time_range[0] >= 1 and time_range[1] <= 1024, "Time step out of range" |
|
|
|
np.random.seed(seed) |
|
|
|
tmp_data_dir = root / "tmp" |
|
all_params_lr, all_params_hr, _ = download_generic( |
|
total_samples, |
|
domain_size, |
|
lr_factor, |
|
time_range, |
|
window_size, |
|
tmp_data_dir, |
|
token, |
|
) |
|
assert len(all_params_lr) == len(all_params_hr), "Length mismatch" |
|
|
|
|
|
cur_root = root / name |
|
cur_root.mkdir(parents=True, exist_ok=True) |
|
splits_ratios = [("train", train_split), ("val", val_split), ("test", test_split)] |
|
|
|
for split, split_ratio in splits_ratios: |
|
|
|
split_dir = cur_root / split |
|
split_params_lr = all_params_lr[: int(total_samples * split_ratio)] |
|
split_params_hr = all_params_hr[: int(total_samples * split_ratio)] |
|
|
|
|
|
split_paths = [ |
|
p["path"] |
|
for p in split_params_hr + list(chain.from_iterable(split_params_lr)) |
|
] |
|
with zipfile.ZipFile(cur_root / f"{split}.zip", "w") as z: |
|
for p in split_paths: |
|
z.write(p, p.name) |
|
|
|
|
|
metadata = [] |
|
for lr, hr in zip(split_params_lr, split_params_hr): |
|
metadata.append( |
|
{ |
|
"time_step": hr["time_step"], |
|
"window_size": window_size, |
|
"sx": hr["start"][0], |
|
"sy": hr["start"][1], |
|
"sz": hr["start"][2], |
|
"ex": hr["end"][0], |
|
"ey": hr["end"][1], |
|
"ez": hr["end"][2], |
|
"lr_factor": lr_factor, |
|
"hr_path": str(split_dir / hr["path"].name), |
|
"lr_paths": json.dumps( |
|
[str(split_dir / p["path"].name) for p in lr] |
|
), |
|
} |
|
) |
|
|
|
metadata_df = pd.DataFrame(metadata) |
|
metadata_df.to_csv(cur_root / f"metadata_{split}.csv", index=False) |
|
|
|
|
|
if __name__ == "__main__": |
|
CLI(make_jhtdb_dataset) |
|
|